hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
1c353f5245e4b041683e3947627ee7976e474c94
17,653
py
Python
tradingbot/components/broker/ig_interface.py
ajmal017/TradingBot-3
4a75562fb621d135d83b1770a3943fc9898adb66
[ "MIT" ]
null
null
null
tradingbot/components/broker/ig_interface.py
ajmal017/TradingBot-3
4a75562fb621d135d83b1770a3943fc9898adb66
[ "MIT" ]
19
2021-11-03T12:19:06.000Z
2022-03-30T12:28:29.000Z
tradingbot/components/broker/ig_interface.py
zacharyrgonzales/TradingBot
cb29c0cacc7ec655b79ad485492b265465068e84
[ "MIT" ]
1
2020-08-16T02:29:45.000Z
2020-08-16T02:29:45.000Z
import json import logging from enum import Enum from typing import Any, Dict, List, Optional import pandas import requests from ...interfaces import Market, MarketHistory, MarketMACD, Position from .. import Interval, TradeDirection, Utils from . import AccountBalances, AccountInterface, StocksInterface class IG_API_URL(Enum): """ IG REST API urls """ BASE_URI = "https://@api.ig.com/gateway/deal" DEMO_PREFIX = "demo-" SESSION = "session" ACCOUNTS = "accounts" POSITIONS = "positions" POSITIONS_OTC = "positions/otc" MARKETS = "markets" PRICES = "prices" CONFIRMS = "confirms" MARKET_NAV = "marketnavigation" WATCHLISTS = "watchlists" class IGInterface(AccountInterface, StocksInterface): """ IG broker interface class, provides functions to use the IG REST API """ api_base_url: str authenticated_headers: Dict[str, str] def initialise(self) -> None: logging.info("initialising IGInterface...") demoPrefix = ( IG_API_URL.DEMO_PREFIX.value if self._config.get_ig_use_demo_account() else "" ) self.api_base_url = IG_API_URL.BASE_URI.value.replace("@", demoPrefix) self.authenticated_headers = {} if self._config.is_paper_trading_enabled(): logging.info("Paper trading is active") if not self.authenticate(): logging.error("Authentication failed") raise RuntimeError("Unable to authenticate to IG Index. Check credentials") def authenticate(self) -> bool: """ Authenticate the IGInterface instance with the configured credentials """ data = { "identifier": self._config.get_credentials()["username"], "password": self._config.get_credentials()["password"], } headers = { "Content-Type": "application/json; charset=utf-8", "Accept": "application/json; charset=utf-8", "X-IG-API-KEY": self._config.get_credentials()["api_key"], "Version": "2", } url = "{}/{}".format(self.api_base_url, IG_API_URL.SESSION.value) response = requests.post(url, data=json.dumps(data), headers=headers) if response.status_code != 200: logging.debug( "Authentication returned code: {}".format(response.status_code) ) return False headers_json = dict(response.headers) try: CST_token = headers_json["CST"] x_sec_token = headers_json["X-SECURITY-TOKEN"] except Exception: return False self.authenticated_headers = { "Content-Type": "application/json; charset=utf-8", "Accept": "application/json; charset=utf-8", "X-IG-API-KEY": self._config.get_credentials()["api_key"], "CST": CST_token, "X-SECURITY-TOKEN": x_sec_token, } self.set_default_account(self._config.get_credentials()["account_id"]) return True def set_default_account(self, accountId: str) -> bool: """ Sets the IG account to use - **accountId**: String representing the accound id to use - Returns **False** if an error occurs otherwise True """ url = "{}/{}".format(self.api_base_url, IG_API_URL.SESSION.value) data = {"accountId": accountId, "defaultAccount": "True"} response = requests.put( url, data=json.dumps(data), headers=self.authenticated_headers ) if response.status_code != 200: return False logging.info("Using default account: {}".format(accountId)) return True def get_account_balances(self) -> AccountBalances: """ Returns a tuple (balance, deposit) for the account in use - Returns **(None,None)** if an error occurs otherwise (balance, deposit) """ url = "{}/{}".format(self.api_base_url, IG_API_URL.ACCOUNTS.value) d = self._http_get(url) if d is not None: try: for i in d["accounts"]: if str(i["accountType"]) == "SPREADBET": balance = i["balance"]["balance"] deposit = i["balance"]["deposit"] return balance, deposit except Exception: return None, None return None, None def get_open_positions(self) -> List[Position]: """ Returns the account open positions in an json object - Returns the json object returned by the IG API """ url = "{}/{}".format(self.api_base_url, IG_API_URL.POSITIONS.value) data = self._http_get(url) positions = [] for d in data["positions"]: positions.append( Position( deal_id=d["position"]["dealId"], size=d["position"]["size"], create_date=d["position"]["createdDateUTC"], direction=TradeDirection[d["position"]["direction"]], level=d["position"]["level"], limit=d["position"]["limitLevel"], stop=d["position"]["stopLevel"], currency=d["position"]["currency"], epic=d["market"]["epic"], market_id=None, ) ) return positions def get_positions_map(self) -> Dict[str, int]: """ Returns a *dict* containing the account open positions in the form {string: int} where the string is defined as 'marketId-tradeDirection' and the int is the trade size - Returns **None** if an error occurs otherwise a dict(string:int) """ positionMap: Dict[str, int] = {} for item in self.get_open_positions(): key = item.epic + "-" + item.direction.name if key in positionMap: positionMap[key] = item.size + positionMap[key] else: positionMap[key] = item.size return positionMap def get_market_info(self, epic_id: str) -> Market: """ Returns info for the given market including a price snapshot - **epic_id**: market epic as string - Returns **None** if an error occurs otherwise the json returned by IG API """ url = "{}/{}/{}".format(self.api_base_url, IG_API_URL.MARKETS.value, epic_id) info = self._http_get(url) if "markets" in info: raise RuntimeError("Multiple matches found for epic: {}".format(epic_id)) if self._config.get_ig_controlled_risk(): info["minNormalStopOrLimitDistance"] = info["minControlledRiskStopDistance"] market = Market() market.epic = info["instrument"]["epic"] market.id = info["instrument"]["marketId"] market.name = info["instrument"]["name"] market.bid = info["snapshot"]["bid"] market.offer = info["snapshot"]["offer"] market.high = info["snapshot"]["high"] market.low = info["snapshot"]["low"] market.stop_distance_min = info["dealingRules"]["minNormalStopOrLimitDistance"][ "value" ] market.expiry = info["instrument"]["expiry"] return market def search_market(self, search: str) -> List[Market]: """ Returns a list of markets that matched the search string """ url = "{}/{}?searchTerm={}".format( self.api_base_url, IG_API_URL.MARKETS.value, search ) data = self._http_get(url) markets = [] if data is not None and "markets" in data: markets = [self.get_market_info(m["epic"]) for m in data["markets"]] return markets def get_prices( self, market: Market, interval: Interval, data_range: int ) -> MarketHistory: url = "{}/{}/{}/{}/{}".format( self.api_base_url, IG_API_URL.PRICES.value, market.epic, interval, data_range, ) data = self._http_get(url) if "allowance" in data: remaining_allowance = data["allowance"]["remainingAllowance"] reset_time = Utils.humanize_time(int(data["allowance"]["allowanceExpiry"])) if remaining_allowance < 100: logging.warn( "Remaining API calls left: {}".format(str(remaining_allowance)) ) logging.warn("Time to API Key reset: {}".format(str(reset_time))) dates = [] highs = [] lows = [] closes = [] volumes = [] for price in data["prices"]: dates.append(price["snapshotTimeUTC"]) highs.append(price["highPrice"]["bid"]) lows.append(price["lowPrice"]["bid"]) closes.append(price["closePrice"]["bid"]) volumes.append(float(price["lastTradedVolume"])) history = MarketHistory(market, dates, highs, lows, closes, volumes) return history def trade( self, epic_id: str, trade_direction: TradeDirection, limit: float, stop: float ) -> bool: """ Try to open a new trade for the given epic - **epic_id**: market epic as string - **trade_direction**: BUY or SELL - **limit**: limit level - **stop**: stop level - Returns **False** if an error occurs otherwise True """ if self._config.is_paper_trading_enabled(): logging.info( "Paper trade: {} {} with limit={} and stop={}".format( trade_direction.value, epic_id, limit, stop ) ) return True url = "{}/{}".format(self.api_base_url, IG_API_URL.POSITIONS_OTC.value) data = { "direction": trade_direction.value, "epic": epic_id, "limitLevel": limit, "orderType": self._config.get_ig_order_type(), "size": self._config.get_ig_order_size(), "expiry": self._config.get_ig_order_expiry(), "guaranteedStop": self._config.get_ig_use_g_stop(), "currencyCode": self._config.get_ig_order_currency(), "forceOpen": self._config.get_ig_order_force_open(), "stopLevel": stop, } r = requests.post( url, data=json.dumps(data), headers=self.authenticated_headers ) if r.status_code != 200: return False d = json.loads(r.text) deal_ref = d["dealReference"] if self.confirm_order(deal_ref): logging.info( "Order {} for {} confirmed with limit={} and stop={}".format( trade_direction.value, epic_id, limit, stop ) ) return True else: logging.warning( "Trade {} of {} has failed!".format(trade_direction.value, epic_id) ) return False def confirm_order(self, dealRef: str) -> bool: """ Confirm an order from a dealing reference - **dealRef**: dealing reference to confirm - Returns **False** if an error occurs otherwise True """ url = "{}/{}/{}".format(self.api_base_url, IG_API_URL.CONFIRMS.value, dealRef) d = self._http_get(url) if d is not None: if d["reason"] != "SUCCESS": return False else: return True return False def close_position(self, position: Position) -> bool: """ Close the given market position - **position**: position json object obtained from IG API - Returns **False** if an error occurs otherwise True """ if self._config.is_paper_trading_enabled(): logging.info("Paper trade: close {} position".format(position.epic)) return True # To close we need the opposite direction direction = TradeDirection.NONE if position.direction is TradeDirection.BUY: direction = TradeDirection.SELL elif position.direction is TradeDirection.SELL: direction = TradeDirection.BUY else: logging.error("Wrong position direction!") return False url = "{}/{}".format(self.api_base_url, IG_API_URL.POSITIONS_OTC.value) data = { "dealId": position.deal_id, "epic": None, "expiry": None, "direction": direction.name, "size": "1", "level": None, "orderType": "MARKET", "timeInForce": None, "quoteId": None, } del_headers = dict(self.authenticated_headers) del_headers["_method"] = "DELETE" r = requests.post(url, data=json.dumps(data), headers=del_headers) if r.status_code != 200: return False d = json.loads(r.text) deal_ref = d["dealReference"] if self.confirm_order(deal_ref): logging.info("Position for {} closed".format(position.epic)) return True else: logging.error("Could not close position for {}".format(position.epic)) return False def close_all_positions(self) -> bool: """ Try to close all the account open positions. - Returns **False** if an error occurs otherwise True """ result = True try: positions = self.get_open_positions() if positions is not None: for p in positions: try: if not self.close_position(p): result = False except Exception: logging.error( "Error closing position for {}".format(p.market_id) ) result = False else: logging.error("Unable to retrieve open positions!") result = False except Exception: logging.error("Error during close all positions") result = False return result def get_account_used_perc(self) -> Optional[float]: """ Fetch the percentage of available balance is currently used - Returns the percentage of account used over total available amount """ balance, deposit = self.get_account_balances() if balance is None or deposit is None: return None return Utils.percentage(deposit, balance) def navigate_market_node(self, node_id: str) -> Dict[str, Any]: """ Navigate the market node id - Returns the json representing the market node """ url = "{}/{}/{}".format(self.api_base_url, IG_API_URL.MARKET_NAV.value, node_id) return self._http_get(url) def _get_watchlist(self, id: str) -> Dict[str, Any]: """ Get the watchlist info - **id**: id of the watchlist. If empty id is provided, the function returns the list of all the watchlist in the account """ url = "{}/{}/{}".format(self.api_base_url, IG_API_URL.WATCHLISTS.value, id) return self._http_get(url) def get_markets_from_watchlist(self, name: str) -> List[Market]: """ Get the list of markets included in the watchlist - **name**: name of the watchlist """ markets = [] # Request with empty name returns list of all the watchlists all_watchlists = self._get_watchlist("") for w in all_watchlists["watchlists"]: if "name" in w and w["name"] == name: data = self._get_watchlist(w["id"]) if "markets" in data: for m in data["markets"]: markets.append(self.get_market_info(m["epic"])) break return markets def _http_get(self, url: str) -> Dict[str, Any]: """ Perform an HTTP GET request to the url. Return the json object returned from the API if 200 is received Return None if an error is received from the API """ self._wait_before_call(self._config.get_ig_api_timeout()) response = requests.get(url, headers=self.authenticated_headers) if response.status_code != 200: logging.error("HTTP request returned {}".format(response.status_code)) raise RuntimeError("HTTP request returned {}".format(response.status_code)) data = json.loads(response.text) if "errorCode" in data: logging.error(data["errorCode"]) raise RuntimeError(data["errorCode"]) return data def get_macd( self, market: Market, interval: Interval, data_range: int ) -> MarketMACD: data = self._macd_dataframe(market, interval) # TODO Put date instead of index numbers return MarketMACD( market, data.index, data["MACD"].values, data["Signal"].values, data["Hist"].values, ) def _macd_dataframe(self, market: Market, interval: Interval) -> pandas.DataFrame: prices = self.get_prices(market, Interval.DAY, 26) if prices is None: return None return Utils.macd_df_from_list( prices.dataframe[MarketHistory.CLOSE_COLUMN].values )
36.624481
88
0.562511
import json import logging from enum import Enum from typing import Any, Dict, List, Optional import pandas import requests from ...interfaces import Market, MarketHistory, MarketMACD, Position from .. import Interval, TradeDirection, Utils from . import AccountBalances, AccountInterface, StocksInterface class IG_API_URL(Enum): BASE_URI = "https://@api.ig.com/gateway/deal" DEMO_PREFIX = "demo-" SESSION = "session" ACCOUNTS = "accounts" POSITIONS = "positions" POSITIONS_OTC = "positions/otc" MARKETS = "markets" PRICES = "prices" CONFIRMS = "confirms" MARKET_NAV = "marketnavigation" WATCHLISTS = "watchlists" class IGInterface(AccountInterface, StocksInterface): api_base_url: str authenticated_headers: Dict[str, str] def initialise(self) -> None: logging.info("initialising IGInterface...") demoPrefix = ( IG_API_URL.DEMO_PREFIX.value if self._config.get_ig_use_demo_account() else "" ) self.api_base_url = IG_API_URL.BASE_URI.value.replace("@", demoPrefix) self.authenticated_headers = {} if self._config.is_paper_trading_enabled(): logging.info("Paper trading is active") if not self.authenticate(): logging.error("Authentication failed") raise RuntimeError("Unable to authenticate to IG Index. Check credentials") def authenticate(self) -> bool: data = { "identifier": self._config.get_credentials()["username"], "password": self._config.get_credentials()["password"], } headers = { "Content-Type": "application/json; charset=utf-8", "Accept": "application/json; charset=utf-8", "X-IG-API-KEY": self._config.get_credentials()["api_key"], "Version": "2", } url = "{}/{}".format(self.api_base_url, IG_API_URL.SESSION.value) response = requests.post(url, data=json.dumps(data), headers=headers) if response.status_code != 200: logging.debug( "Authentication returned code: {}".format(response.status_code) ) return False headers_json = dict(response.headers) try: CST_token = headers_json["CST"] x_sec_token = headers_json["X-SECURITY-TOKEN"] except Exception: return False self.authenticated_headers = { "Content-Type": "application/json; charset=utf-8", "Accept": "application/json; charset=utf-8", "X-IG-API-KEY": self._config.get_credentials()["api_key"], "CST": CST_token, "X-SECURITY-TOKEN": x_sec_token, } self.set_default_account(self._config.get_credentials()["account_id"]) return True def set_default_account(self, accountId: str) -> bool: url = "{}/{}".format(self.api_base_url, IG_API_URL.SESSION.value) data = {"accountId": accountId, "defaultAccount": "True"} response = requests.put( url, data=json.dumps(data), headers=self.authenticated_headers ) if response.status_code != 200: return False logging.info("Using default account: {}".format(accountId)) return True def get_account_balances(self) -> AccountBalances: url = "{}/{}".format(self.api_base_url, IG_API_URL.ACCOUNTS.value) d = self._http_get(url) if d is not None: try: for i in d["accounts"]: if str(i["accountType"]) == "SPREADBET": balance = i["balance"]["balance"] deposit = i["balance"]["deposit"] return balance, deposit except Exception: return None, None return None, None def get_open_positions(self) -> List[Position]: url = "{}/{}".format(self.api_base_url, IG_API_URL.POSITIONS.value) data = self._http_get(url) positions = [] for d in data["positions"]: positions.append( Position( deal_id=d["position"]["dealId"], size=d["position"]["size"], create_date=d["position"]["createdDateUTC"], direction=TradeDirection[d["position"]["direction"]], level=d["position"]["level"], limit=d["position"]["limitLevel"], stop=d["position"]["stopLevel"], currency=d["position"]["currency"], epic=d["market"]["epic"], market_id=None, ) ) return positions def get_positions_map(self) -> Dict[str, int]: positionMap: Dict[str, int] = {} for item in self.get_open_positions(): key = item.epic + "-" + item.direction.name if key in positionMap: positionMap[key] = item.size + positionMap[key] else: positionMap[key] = item.size return positionMap def get_market_info(self, epic_id: str) -> Market: url = "{}/{}/{}".format(self.api_base_url, IG_API_URL.MARKETS.value, epic_id) info = self._http_get(url) if "markets" in info: raise RuntimeError("Multiple matches found for epic: {}".format(epic_id)) if self._config.get_ig_controlled_risk(): info["minNormalStopOrLimitDistance"] = info["minControlledRiskStopDistance"] market = Market() market.epic = info["instrument"]["epic"] market.id = info["instrument"]["marketId"] market.name = info["instrument"]["name"] market.bid = info["snapshot"]["bid"] market.offer = info["snapshot"]["offer"] market.high = info["snapshot"]["high"] market.low = info["snapshot"]["low"] market.stop_distance_min = info["dealingRules"]["minNormalStopOrLimitDistance"][ "value" ] market.expiry = info["instrument"]["expiry"] return market def search_market(self, search: str) -> List[Market]: url = "{}/{}?searchTerm={}".format( self.api_base_url, IG_API_URL.MARKETS.value, search ) data = self._http_get(url) markets = [] if data is not None and "markets" in data: markets = [self.get_market_info(m["epic"]) for m in data["markets"]] return markets def get_prices( self, market: Market, interval: Interval, data_range: int ) -> MarketHistory: url = "{}/{}/{}/{}/{}".format( self.api_base_url, IG_API_URL.PRICES.value, market.epic, interval, data_range, ) data = self._http_get(url) if "allowance" in data: remaining_allowance = data["allowance"]["remainingAllowance"] reset_time = Utils.humanize_time(int(data["allowance"]["allowanceExpiry"])) if remaining_allowance < 100: logging.warn( "Remaining API calls left: {}".format(str(remaining_allowance)) ) logging.warn("Time to API Key reset: {}".format(str(reset_time))) dates = [] highs = [] lows = [] closes = [] volumes = [] for price in data["prices"]: dates.append(price["snapshotTimeUTC"]) highs.append(price["highPrice"]["bid"]) lows.append(price["lowPrice"]["bid"]) closes.append(price["closePrice"]["bid"]) volumes.append(float(price["lastTradedVolume"])) history = MarketHistory(market, dates, highs, lows, closes, volumes) return history def trade( self, epic_id: str, trade_direction: TradeDirection, limit: float, stop: float ) -> bool: if self._config.is_paper_trading_enabled(): logging.info( "Paper trade: {} {} with limit={} and stop={}".format( trade_direction.value, epic_id, limit, stop ) ) return True url = "{}/{}".format(self.api_base_url, IG_API_URL.POSITIONS_OTC.value) data = { "direction": trade_direction.value, "epic": epic_id, "limitLevel": limit, "orderType": self._config.get_ig_order_type(), "size": self._config.get_ig_order_size(), "expiry": self._config.get_ig_order_expiry(), "guaranteedStop": self._config.get_ig_use_g_stop(), "currencyCode": self._config.get_ig_order_currency(), "forceOpen": self._config.get_ig_order_force_open(), "stopLevel": stop, } r = requests.post( url, data=json.dumps(data), headers=self.authenticated_headers ) if r.status_code != 200: return False d = json.loads(r.text) deal_ref = d["dealReference"] if self.confirm_order(deal_ref): logging.info( "Order {} for {} confirmed with limit={} and stop={}".format( trade_direction.value, epic_id, limit, stop ) ) return True else: logging.warning( "Trade {} of {} has failed!".format(trade_direction.value, epic_id) ) return False def confirm_order(self, dealRef: str) -> bool: url = "{}/{}/{}".format(self.api_base_url, IG_API_URL.CONFIRMS.value, dealRef) d = self._http_get(url) if d is not None: if d["reason"] != "SUCCESS": return False else: return True return False def close_position(self, position: Position) -> bool: if self._config.is_paper_trading_enabled(): logging.info("Paper trade: close {} position".format(position.epic)) return True direction = TradeDirection.NONE if position.direction is TradeDirection.BUY: direction = TradeDirection.SELL elif position.direction is TradeDirection.SELL: direction = TradeDirection.BUY else: logging.error("Wrong position direction!") return False url = "{}/{}".format(self.api_base_url, IG_API_URL.POSITIONS_OTC.value) data = { "dealId": position.deal_id, "epic": None, "expiry": None, "direction": direction.name, "size": "1", "level": None, "orderType": "MARKET", "timeInForce": None, "quoteId": None, } del_headers = dict(self.authenticated_headers) del_headers["_method"] = "DELETE" r = requests.post(url, data=json.dumps(data), headers=del_headers) if r.status_code != 200: return False d = json.loads(r.text) deal_ref = d["dealReference"] if self.confirm_order(deal_ref): logging.info("Position for {} closed".format(position.epic)) return True else: logging.error("Could not close position for {}".format(position.epic)) return False def close_all_positions(self) -> bool: result = True try: positions = self.get_open_positions() if positions is not None: for p in positions: try: if not self.close_position(p): result = False except Exception: logging.error( "Error closing position for {}".format(p.market_id) ) result = False else: logging.error("Unable to retrieve open positions!") result = False except Exception: logging.error("Error during close all positions") result = False return result def get_account_used_perc(self) -> Optional[float]: balance, deposit = self.get_account_balances() if balance is None or deposit is None: return None return Utils.percentage(deposit, balance) def navigate_market_node(self, node_id: str) -> Dict[str, Any]: url = "{}/{}/{}".format(self.api_base_url, IG_API_URL.MARKET_NAV.value, node_id) return self._http_get(url) def _get_watchlist(self, id: str) -> Dict[str, Any]: url = "{}/{}/{}".format(self.api_base_url, IG_API_URL.WATCHLISTS.value, id) return self._http_get(url) def get_markets_from_watchlist(self, name: str) -> List[Market]: markets = [] all_watchlists = self._get_watchlist("") for w in all_watchlists["watchlists"]: if "name" in w and w["name"] == name: data = self._get_watchlist(w["id"]) if "markets" in data: for m in data["markets"]: markets.append(self.get_market_info(m["epic"])) break return markets def _http_get(self, url: str) -> Dict[str, Any]: self._wait_before_call(self._config.get_ig_api_timeout()) response = requests.get(url, headers=self.authenticated_headers) if response.status_code != 200: logging.error("HTTP request returned {}".format(response.status_code)) raise RuntimeError("HTTP request returned {}".format(response.status_code)) data = json.loads(response.text) if "errorCode" in data: logging.error(data["errorCode"]) raise RuntimeError(data["errorCode"]) return data def get_macd( self, market: Market, interval: Interval, data_range: int ) -> MarketMACD: data = self._macd_dataframe(market, interval) return MarketMACD( market, data.index, data["MACD"].values, data["Signal"].values, data["Hist"].values, ) def _macd_dataframe(self, market: Market, interval: Interval) -> pandas.DataFrame: prices = self.get_prices(market, Interval.DAY, 26) if prices is None: return None return Utils.macd_df_from_list( prices.dataframe[MarketHistory.CLOSE_COLUMN].values )
true
true
1c353fccf7a39706f3f78621959aee8a1fa094d1
1,181
py
Python
cern_access/indico_cern_access/models/access_request_regforms.py
ThiefMaster/indico-plugins-cern
0082a66dd21ac093c1a31316d12c338f52ffe2d0
[ "MIT" ]
4
2019-02-12T05:08:56.000Z
2022-03-09T23:43:18.000Z
cern_access/indico_cern_access/models/access_request_regforms.py
ThiefMaster/indico-plugins-cern
0082a66dd21ac093c1a31316d12c338f52ffe2d0
[ "MIT" ]
40
2017-11-08T15:08:50.000Z
2022-03-28T15:09:51.000Z
cern_access/indico_cern_access/models/access_request_regforms.py
ThiefMaster/indico-plugins-cern
0082a66dd21ac093c1a31316d12c338f52ffe2d0
[ "MIT" ]
15
2017-11-08T12:35:59.000Z
2022-01-13T15:16:42.000Z
# This file is part of the CERN Indico plugins. # Copyright (C) 2014 - 2021 CERN # # The CERN Indico plugins are free software; you can redistribute # them and/or modify them under the terms of the MIT License; see # the LICENSE file for more details. from sqlalchemy.ext.hybrid import hybrid_property from indico.core.db.sqlalchemy import PyIntEnum, db from indico_cern_access.models.access_requests import CERNAccessRequestState class CERNAccessRequestRegForm(db.Model): __tablename__ = 'access_request_regforms' __table_args__ = {'schema': 'plugin_cern_access'} form_id = db.Column( db.ForeignKey('event_registration.forms.id'), primary_key=True ) request_state = db.Column( PyIntEnum(CERNAccessRequestState), nullable=False, default=CERNAccessRequestState.not_requested ) registration_form = db.relationship( 'RegistrationForm', uselist=False, lazy=False, backref=db.backref( 'cern_access_request', uselist=False ) ) @hybrid_property def is_active(self): return self.request_state != CERNAccessRequestState.withdrawn
28.119048
76
0.703641
from sqlalchemy.ext.hybrid import hybrid_property from indico.core.db.sqlalchemy import PyIntEnum, db from indico_cern_access.models.access_requests import CERNAccessRequestState class CERNAccessRequestRegForm(db.Model): __tablename__ = 'access_request_regforms' __table_args__ = {'schema': 'plugin_cern_access'} form_id = db.Column( db.ForeignKey('event_registration.forms.id'), primary_key=True ) request_state = db.Column( PyIntEnum(CERNAccessRequestState), nullable=False, default=CERNAccessRequestState.not_requested ) registration_form = db.relationship( 'RegistrationForm', uselist=False, lazy=False, backref=db.backref( 'cern_access_request', uselist=False ) ) @hybrid_property def is_active(self): return self.request_state != CERNAccessRequestState.withdrawn
true
true
1c353ffb374352e83aaa13aa425b716aa22909aa
101
py
Python
bike_app/constants.py
Himanshu372/Bike-pool-app
937936e9a09a71224f74bfd25dd52a98df35f267
[ "MIT" ]
3
2021-02-18T17:06:09.000Z
2021-09-17T07:52:58.000Z
bike_app/constants.py
Himanshu372/Bike_pool_app
937936e9a09a71224f74bfd25dd52a98df35f267
[ "MIT" ]
10
2020-07-30T17:37:29.000Z
2021-06-09T18:21:09.000Z
bike_app/constants.py
Himanshu372/Bike-pool-app
937936e9a09a71224f74bfd25dd52a98df35f267
[ "MIT" ]
1
2020-05-03T13:00:01.000Z
2020-05-03T13:00:01.000Z
MAPQUEST_URL = 'http://www.mapquestapi.com/geocoding/v1/address?key=SsRGOwY10OEkejYJYB2ACoaUiNtDDoIv'
101
101
0.851485
MAPQUEST_URL = 'http://www.mapquestapi.com/geocoding/v1/address?key=SsRGOwY10OEkejYJYB2ACoaUiNtDDoIv'
true
true
1c3540843dbf08b1d3859f0070518a82ffe0a3c8
14,738
py
Python
GAT_prediction.py
897741007/EIAA
94a071687eb387f199f9d8a82848a40ef2a6f5d7
[ "Apache-2.0" ]
null
null
null
GAT_prediction.py
897741007/EIAA
94a071687eb387f199f9d8a82848a40ef2a6f5d7
[ "Apache-2.0" ]
null
null
null
GAT_prediction.py
897741007/EIAA
94a071687eb387f199f9d8a82848a40ef2a6f5d7
[ "Apache-2.0" ]
null
null
null
import torch from torch import nn import numpy as np from Smi2Graph import SMI_grapher from time import time import os class GAT_predictor(nn.Module): def __init__(self, hidden_dim, layer_num, head_num, dict_size, dropout=0, bond_influence=0, prediction_class=2, device='cuda'): # param : bond_influence --> how to merge the influence of bond in the attention # 0: ingore the influence of bond # 1: add the embedding of bond to K # 2: mul the embedding of bond to K # param : prediction_class --> the number of classification labels # 1: the task is a regression task # n(n>1): the task is a classification task super(GAT_predictor, self).__init__() self.hidden_dim = hidden_dim self.layer_num = layer_num self.head_num = head_num self.dict_size = dict_size assert dropout < 1 self.dropout = dropout assert bond_influence in (0, 1, 2) self.bond_influence = bond_influence self.prediction_class = prediction_class self.scale = np.sqrt(hidden_dim) self.sp_dim = int(hidden_dim/head_num) self.device = device self.GAT_init() def GAT_init(self): self.atom_embedding_layer = nn.Embedding(self.dict_size, self.hidden_dim).to(self.device) # bond type: non-link, self-link, single-bond, double-bond, trible-bond, π-adj, π-meta, π-para if self.bond_influence: # N(0, 1), if the influence of bond is add to the weight matrix, the distribution of embedding ~ N(0,1) bond_embedding_weight = torch.randn(8, self.hidden_dim, device=self.device) if self.bond_influence==2: # N(1, 1), if the influence of bond is mul to the weight matrix, the distribution of embedding ~ N(1,1) bond_embedding_weight = bond_embedding_weight + torch.ones_like(bond_embedding_weight, device=self.device) self.bond_embedding_layer = nn.Embedding(8, self.hidden_dim,_weight=bond_embedding_weight).to(self.device) self.q_layers = nn.ModuleList() self.k_layers = nn.ModuleList() self.v_layers = nn.ModuleList() self.ew_layers = nn.ModuleList() self.stack_head_layers = nn.ModuleList() self.FNN_layers = nn.ModuleList() for _ in range(self.layer_num): self.q_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.k_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.v_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.ew_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.stack_head_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.FNN_layers.append(nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim*4, bias=False).to(self.device), nn.Linear(self.hidden_dim*4, self.hidden_dim, bias=False).to(self.device)])) self.output_layer = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device), nn.Tanh().to(self.device)) self.predictor_layer = nn.Linear(self.hidden_dim, self.prediction_class).to(self.device) if self.prediction_class > 1: self.predictor_softmax = nn.Softmax(dim=-1) def weighted_qkmm(self, q, k, w): # q.shape : [batch_size, head_num, token_len, split_dim] # k.shape : [batch_size, head_num, token_len, split_dim] # w.shape : [batch_size, head_num, token_len, token_len, split_dim] q = q.unsqueeze(-2).expand(w.shape) k = k.unsqueeze(-3).expand(w.shape) if self.bond_influence == 1: k = k + w else: k = torch.mul(k, w) sim_s = torch.mul(q, k) sim = sim_s.sum(-1) return sim def edge_attention(self, attn_out, w): # update the feature of edge according to edge and the linked atoms # new_edge : sum(Softmax(edge*atom_0, edge*edge, edge*atom_1) * (atom_0, edge, atom_1)) # atten_out.shape : [batch_size, token_len, hidden_dim] # w.shape : [batch_size, token_len, token_len, hidden_dim] ew_0 = w.unsqueeze(-2) ew_1 = w.unsqueeze(-1) ew_s = torch.matmul(ew_0, ew_1).squeeze(-2) # atom_0 * edge aw_0 = attn_out.unsqueeze(2).expand_as(w).unsqueeze(-1) aw_0s = torch.matmul(ew_0, aw_0).squeeze(-2) # atom_1 * edge aw_1 = attn_out.unsqueeze(1).expand_as(w).unsqueeze(-1) aw_1s = torch.matmul(ew_0, aw_1).squeeze(-2) # Softmax weight = torch.cat((ew_s, aw_0s, aw_1s), dim=-1) weight = weight/self.scale weight = nn.Softmax(dim=-1)(weight).unsqueeze(-2) # weighted sum hidden = torch.cat((ew_1, aw_0, aw_1), dim=-1).permute((0,1,2,4,3)) new_e = torch.matmul(weight, hidden).squeeze(-2) return new_e def split_head(self, tensor): # split the head into n heads k = tensor.shape if len(k) == 4: return tensor else: split_tensor = tensor.reshape(k[0], k[1], self.head_num, self.sp_dim) split_tensor = split_tensor.permute(0,2,1,3) return split_tensor def split_bond_embedding(self, bond_embedding): # split the bond embedding into n heads # bond_embedding shape : [batch_size, tensor_length, tensor_length, embedding_dim] # splited_bond_embedding shape : [batch_size, head_num, tensor_length, tensor_length, splited_dim] k = bond_embedding.shape split_bond_embedding = bond_embedding.reshape(k[0], k[1], k[2], self.head_num, self.sp_dim) split_bond_embedding = split_bond_embedding.permute(0,3,1,2,4) return split_bond_embedding def combine_head(self, tensor): # combine the split heads into one head k = tensor.shape if len(k) == 3: return tensor else: combine_tensor = tensor.permute(0,2,1,3) combine_tensor = combine_tensor.reshape(k[0], k[2], self.hidden_dim) return combine_tensor def attention_mask(self, logits, adjacency_matrix): # mask attention weights to control the range of attention # each atom can only see the atoms it linked and the atom itself # different type of bonds are regard as the same bonds multi_head_adjacency_matrix = adjacency_matrix.unsqueeze(1).expand(logits.shape) #logits[multi_head_adjacency_matrix<0.5] = -np.inf #logits[multi_head_adjacency_matrix<0.5] = -1e9 attn_scores = torch.zeros_like(multi_head_adjacency_matrix, dtype=torch.float) attn_scores[multi_head_adjacency_matrix==0] = -np.inf logits = logits + attn_scores #torch.save(logits, 'test_logits.pkl') return logits def multi_head_attention_layer(self, q, k, v, idx, bond_embedding, attn_mask_template): # scaled dot multi head-attention # parameter : attn_mask_template ---> the template of masking the attention if self.bond_influence: logits = self.weighted_qkmm(q, k, bond_embedding) else: logits = torch.matmul(q, k.permute(0, 1, 3, 2)) logits = logits/self.scale logits = self.attention_mask(logits, attn_mask_template) weights = nn.Softmax(dim=-1)(logits) if self.dropout: weights = nn.Dropout(self.dropout)(weights) #torch.save(weights, 'test_weights.pkl') out_pre = torch.matmul(weights, v) out = self.combine_head(out_pre) out = self.stack_head_layers[idx](out) return out def FNN(self, idx, input_batch): # feed forward layer # consists of two linear layers, the activation only occurs after the first linear layer fnn_hidden_tensor = self.FNN_layers[idx][0](input_batch) fnn_hidden_tensor = nn.ReLU()(fnn_hidden_tensor) if self.dropout: fnn_hidden_tensor = nn.Dropout(self.dropout)(fnn_hidden_tensor) fnn_output = self.FNN_layers[idx][1](fnn_hidden_tensor) return fnn_output def GAT_layer_0(self, input_batch, adj_m, idx, bond_embedding): # attention layer in Graph Attention input_batch = nn.LayerNorm(self.hidden_dim).to(self.device)(input_batch) q = self.split_head(self.q_layers[idx](input_batch)) k = self.split_head(self.k_layers[idx](input_batch)) v = self.split_head(self.v_layers[idx](input_batch)) ew = None if self.bond_influence: #ew = self.split_bond_embedding(self.ew_layers[idx](bond_embedding)) ew = self.split_bond_embedding(bond_embedding) attn_out = self.multi_head_attention_layer(q, k, v, idx, ew, adj_m) if self.dropout: attn_out = nn.Dropout(self.dropout)(attn_out) # the first residual attn_resid = attn_out + input_batch # the first layer normalization #LN_attn_resid = self.transformer_LNs[idx][0](attn_resid) LN_attn_resid = nn.LayerNorm(self.hidden_dim).to(self.device)(attn_resid) # feed forward layer FNN_out = self.FNN(idx, LN_attn_resid) if self.dropout: FNN_out = nn.Dropout(self.dropout)(FNN_out) # the second residual #FNN_resid = FNN_out + LN_attn_resid transformer_output = FNN_out + attn_resid # the second layer normalization #transformer_output = self.transformer_LNs[idx][1](FNN_resid) #transformer_output = nn.LayerNorm(self.hidden_dim).to(self.device)(FNN_resid) return transformer_output def GAT_layer_0e(self, input_batch, adj_m, idx, bond_embedding): # attention layer in Graph Attention # combination of node attention and edge attention input_batch = nn.LayerNorm(self.hidden_dim).to(self.device)(input_batch) q = self.split_head(self.q_layers[idx](input_batch)) k = self.split_head(self.k_layers[idx](input_batch)) v = self.split_head(self.v_layers[idx](input_batch)) ew = self.ew_layers[idx](bond_embedding) attn_out = self.multi_head_attention_layer(q, k, v, idx, self.split_bond_embedding(ew), adj_m) if self.dropout: attn_out = nn.Dropout(self.dropout)(attn_out) # the first residual attn_resid = attn_out + input_batch # the first layer normalization LN_attn_resid = nn.LayerNorm(self.hidden_dim).to(self.device)(attn_resid) # feed forward layer FNN_out = self.FNN(idx, LN_attn_resid) if self.dropout: FNN_out = nn.Dropout(self.dropout)(FNN_out) # the second residual transformer_output = FNN_out + attn_resid # edge attention #print(transformer_output.shape) #print(ew.shape) edge_output = self.edge_attention(transformer_output, ew) return transformer_output, edge_output def GAT_layer_1(self, input_batch, adj_m, idx, bond_embedding): # attention layer in Graph Attention q = self.split_head(self.q_layers[idx](input_batch)) k = self.split_head(self.k_layers[idx](input_batch)) v = self.split_head(self.v_layers[idx](input_batch)) attn_out = self.multi_head_attention_layer(q, k, v, idx, bond_embedding, adj_m) if self.dropout: attn_out = nn.Dropout(self.dropout)(attn_out) # the first residual attn_resid = attn_out + input_batch # the first layer normalization LN_attn_resid = nn.LayerNorm(self.hidden_dim).to(self.device)(attn_resid) # feed forward layer FNN_out = self.FNN(idx, LN_attn_resid) if self.dropout: FNN_out = nn.Dropout(self.dropout)(FNN_out) # the second residual FNN_resid = FNN_out + LN_attn_resid # the second layer normalization transformer_output = nn.LayerNorm(self.hidden_dim).to(self.device)(FNN_resid) return transformer_output def get_cls(self, gat_output): # get the [CLS] of each mol-graph in the batch cls_vector = gat_output[:, 0, :] cls_output = self.output_layer(cls_vector) cls_output = nn.Dropout(p=0.1)(cls_output) cls_output = self.predictor_layer(cls_output) if self.prediction_class > 1: cls_output = self.predictor_softmax(cls_output) #else: #cls_output = cls_output.reshape(-1) return cls_output def forward_(self, input_batch, adj_m): # forward propagation without edge attention # parameter : input_batch ---> Atom information # parameter : adj_m ---> Adjacency matrix atom_embedding = self.atom_embedding_layer(input_batch) if self.bond_influence: bond_embedding = self.bond_embedding_layer(adj_m) #bond_embedding = self.split_bond_embedding(bond_embedding) else: bond_embedding = None g_layer_output = self.GAT_layer_0(atom_embedding, adj_m, 0, bond_embedding) for layer_idx in range(1, self.layer_num): g_layer_output = self.GAT_layer_0(g_layer_output, adj_m, layer_idx, bond_embedding) prediction = self.get_cls(g_layer_output) return prediction def forward(self, input_batch, adj_m): # forward propagation with edge attention # parameter : input_batch ---> Atom information # parameter : adj_m ---> Adjacency matrix atom_embedding = self.atom_embedding_layer(input_batch) bond_embedding = self.bond_embedding_layer(adj_m) g_layer_output, g_layer_egde = self.GAT_layer_0e(atom_embedding, adj_m, 0, bond_embedding) for layer_idx in range(1, self.layer_num): g_layer_output, g_layer_egde = self.GAT_layer_0e(g_layer_output, adj_m, layer_idx, g_layer_egde) prediction = self.get_cls(g_layer_output) return prediction if __name__ == '__main__': hidden_dim = 512 layer_num = 12 head_num = 8 dropout = 0.2 bond_influence = 1 prediction_class = 2 device = 'cuda' graph_provider = SMI_grapher(for_predictor=True, device=device) graph_provider.fit_new(batch_smis) GAT_model = GAT_predictor(hidden_dim, layer_num, head_num, grapher_provider.dict_size, dropout, bond_influence, prediction_class, device)
48.640264
141
0.649003
import torch from torch import nn import numpy as np from Smi2Graph import SMI_grapher from time import time import os class GAT_predictor(nn.Module): def __init__(self, hidden_dim, layer_num, head_num, dict_size, dropout=0, bond_influence=0, prediction_class=2, device='cuda'): super(GAT_predictor, self).__init__() self.hidden_dim = hidden_dim self.layer_num = layer_num self.head_num = head_num self.dict_size = dict_size assert dropout < 1 self.dropout = dropout assert bond_influence in (0, 1, 2) self.bond_influence = bond_influence self.prediction_class = prediction_class self.scale = np.sqrt(hidden_dim) self.sp_dim = int(hidden_dim/head_num) self.device = device self.GAT_init() def GAT_init(self): self.atom_embedding_layer = nn.Embedding(self.dict_size, self.hidden_dim).to(self.device) if self.bond_influence: bond_embedding_weight = torch.randn(8, self.hidden_dim, device=self.device) if self.bond_influence==2: bond_embedding_weight = bond_embedding_weight + torch.ones_like(bond_embedding_weight, device=self.device) self.bond_embedding_layer = nn.Embedding(8, self.hidden_dim,_weight=bond_embedding_weight).to(self.device) self.q_layers = nn.ModuleList() self.k_layers = nn.ModuleList() self.v_layers = nn.ModuleList() self.ew_layers = nn.ModuleList() self.stack_head_layers = nn.ModuleList() self.FNN_layers = nn.ModuleList() for _ in range(self.layer_num): self.q_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.k_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.v_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.ew_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.stack_head_layers.append(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device)) self.FNN_layers.append(nn.ModuleList([nn.Linear(self.hidden_dim, self.hidden_dim*4, bias=False).to(self.device), nn.Linear(self.hidden_dim*4, self.hidden_dim, bias=False).to(self.device)])) self.output_layer = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim, bias=False).to(self.device), nn.Tanh().to(self.device)) self.predictor_layer = nn.Linear(self.hidden_dim, self.prediction_class).to(self.device) if self.prediction_class > 1: self.predictor_softmax = nn.Softmax(dim=-1) def weighted_qkmm(self, q, k, w): q = q.unsqueeze(-2).expand(w.shape) k = k.unsqueeze(-3).expand(w.shape) if self.bond_influence == 1: k = k + w else: k = torch.mul(k, w) sim_s = torch.mul(q, k) sim = sim_s.sum(-1) return sim def edge_attention(self, attn_out, w): ew_0 = w.unsqueeze(-2) ew_1 = w.unsqueeze(-1) ew_s = torch.matmul(ew_0, ew_1).squeeze(-2) aw_0 = attn_out.unsqueeze(2).expand_as(w).unsqueeze(-1) aw_0s = torch.matmul(ew_0, aw_0).squeeze(-2) aw_1 = attn_out.unsqueeze(1).expand_as(w).unsqueeze(-1) aw_1s = torch.matmul(ew_0, aw_1).squeeze(-2) weight = torch.cat((ew_s, aw_0s, aw_1s), dim=-1) weight = weight/self.scale weight = nn.Softmax(dim=-1)(weight).unsqueeze(-2) hidden = torch.cat((ew_1, aw_0, aw_1), dim=-1).permute((0,1,2,4,3)) new_e = torch.matmul(weight, hidden).squeeze(-2) return new_e def split_head(self, tensor): k = tensor.shape if len(k) == 4: return tensor else: split_tensor = tensor.reshape(k[0], k[1], self.head_num, self.sp_dim) split_tensor = split_tensor.permute(0,2,1,3) return split_tensor def split_bond_embedding(self, bond_embedding): k = bond_embedding.shape split_bond_embedding = bond_embedding.reshape(k[0], k[1], k[2], self.head_num, self.sp_dim) split_bond_embedding = split_bond_embedding.permute(0,3,1,2,4) return split_bond_embedding def combine_head(self, tensor): k = tensor.shape if len(k) == 3: return tensor else: combine_tensor = tensor.permute(0,2,1,3) combine_tensor = combine_tensor.reshape(k[0], k[2], self.hidden_dim) return combine_tensor def attention_mask(self, logits, adjacency_matrix): multi_head_adjacency_matrix = adjacency_matrix.unsqueeze(1).expand(logits.shape) attn_scores = torch.zeros_like(multi_head_adjacency_matrix, dtype=torch.float) attn_scores[multi_head_adjacency_matrix==0] = -np.inf logits = logits + attn_scores return logits def multi_head_attention_layer(self, q, k, v, idx, bond_embedding, attn_mask_template): if self.bond_influence: logits = self.weighted_qkmm(q, k, bond_embedding) else: logits = torch.matmul(q, k.permute(0, 1, 3, 2)) logits = logits/self.scale logits = self.attention_mask(logits, attn_mask_template) weights = nn.Softmax(dim=-1)(logits) if self.dropout: weights = nn.Dropout(self.dropout)(weights) out_pre = torch.matmul(weights, v) out = self.combine_head(out_pre) out = self.stack_head_layers[idx](out) return out def FNN(self, idx, input_batch): fnn_hidden_tensor = self.FNN_layers[idx][0](input_batch) fnn_hidden_tensor = nn.ReLU()(fnn_hidden_tensor) if self.dropout: fnn_hidden_tensor = nn.Dropout(self.dropout)(fnn_hidden_tensor) fnn_output = self.FNN_layers[idx][1](fnn_hidden_tensor) return fnn_output def GAT_layer_0(self, input_batch, adj_m, idx, bond_embedding): input_batch = nn.LayerNorm(self.hidden_dim).to(self.device)(input_batch) q = self.split_head(self.q_layers[idx](input_batch)) k = self.split_head(self.k_layers[idx](input_batch)) v = self.split_head(self.v_layers[idx](input_batch)) ew = None if self.bond_influence: ew = self.split_bond_embedding(bond_embedding) attn_out = self.multi_head_attention_layer(q, k, v, idx, ew, adj_m) if self.dropout: attn_out = nn.Dropout(self.dropout)(attn_out) attn_resid = attn_out + input_batch LN_attn_resid = nn.LayerNorm(self.hidden_dim).to(self.device)(attn_resid) FNN_out = self.FNN(idx, LN_attn_resid) if self.dropout: FNN_out = nn.Dropout(self.dropout)(FNN_out) transformer_output = FNN_out + attn_resid return transformer_output def GAT_layer_0e(self, input_batch, adj_m, idx, bond_embedding): input_batch = nn.LayerNorm(self.hidden_dim).to(self.device)(input_batch) q = self.split_head(self.q_layers[idx](input_batch)) k = self.split_head(self.k_layers[idx](input_batch)) v = self.split_head(self.v_layers[idx](input_batch)) ew = self.ew_layers[idx](bond_embedding) attn_out = self.multi_head_attention_layer(q, k, v, idx, self.split_bond_embedding(ew), adj_m) if self.dropout: attn_out = nn.Dropout(self.dropout)(attn_out) attn_resid = attn_out + input_batch LN_attn_resid = nn.LayerNorm(self.hidden_dim).to(self.device)(attn_resid) FNN_out = self.FNN(idx, LN_attn_resid) if self.dropout: FNN_out = nn.Dropout(self.dropout)(FNN_out) transformer_output = FNN_out + attn_resid edge_output = self.edge_attention(transformer_output, ew) return transformer_output, edge_output def GAT_layer_1(self, input_batch, adj_m, idx, bond_embedding): q = self.split_head(self.q_layers[idx](input_batch)) k = self.split_head(self.k_layers[idx](input_batch)) v = self.split_head(self.v_layers[idx](input_batch)) attn_out = self.multi_head_attention_layer(q, k, v, idx, bond_embedding, adj_m) if self.dropout: attn_out = nn.Dropout(self.dropout)(attn_out) attn_resid = attn_out + input_batch LN_attn_resid = nn.LayerNorm(self.hidden_dim).to(self.device)(attn_resid) FNN_out = self.FNN(idx, LN_attn_resid) if self.dropout: FNN_out = nn.Dropout(self.dropout)(FNN_out) FNN_resid = FNN_out + LN_attn_resid transformer_output = nn.LayerNorm(self.hidden_dim).to(self.device)(FNN_resid) return transformer_output def get_cls(self, gat_output): cls_vector = gat_output[:, 0, :] cls_output = self.output_layer(cls_vector) cls_output = nn.Dropout(p=0.1)(cls_output) cls_output = self.predictor_layer(cls_output) if self.prediction_class > 1: cls_output = self.predictor_softmax(cls_output) return cls_output def forward_(self, input_batch, adj_m): atom_embedding = self.atom_embedding_layer(input_batch) if self.bond_influence: bond_embedding = self.bond_embedding_layer(adj_m) else: bond_embedding = None g_layer_output = self.GAT_layer_0(atom_embedding, adj_m, 0, bond_embedding) for layer_idx in range(1, self.layer_num): g_layer_output = self.GAT_layer_0(g_layer_output, adj_m, layer_idx, bond_embedding) prediction = self.get_cls(g_layer_output) return prediction def forward(self, input_batch, adj_m): atom_embedding = self.atom_embedding_layer(input_batch) bond_embedding = self.bond_embedding_layer(adj_m) g_layer_output, g_layer_egde = self.GAT_layer_0e(atom_embedding, adj_m, 0, bond_embedding) for layer_idx in range(1, self.layer_num): g_layer_output, g_layer_egde = self.GAT_layer_0e(g_layer_output, adj_m, layer_idx, g_layer_egde) prediction = self.get_cls(g_layer_output) return prediction if __name__ == '__main__': hidden_dim = 512 layer_num = 12 head_num = 8 dropout = 0.2 bond_influence = 1 prediction_class = 2 device = 'cuda' graph_provider = SMI_grapher(for_predictor=True, device=device) graph_provider.fit_new(batch_smis) GAT_model = GAT_predictor(hidden_dim, layer_num, head_num, grapher_provider.dict_size, dropout, bond_influence, prediction_class, device)
true
true
1c35411f49c68e3912b5bdc905b7cf0c05c5d8d1
12,490
py
Python
differentiable_robot_model/spatial_vector_algebra.py
Neotriple/differentiable-robot-model
7b3887b5d80ad7d99379962f9f46aabfd4a1c46d
[ "MIT" ]
null
null
null
differentiable_robot_model/spatial_vector_algebra.py
Neotriple/differentiable-robot-model
7b3887b5d80ad7d99379962f9f46aabfd4a1c46d
[ "MIT" ]
null
null
null
differentiable_robot_model/spatial_vector_algebra.py
Neotriple/differentiable-robot-model
7b3887b5d80ad7d99379962f9f46aabfd4a1c46d
[ "MIT" ]
null
null
null
from __future__ import annotations import torch import hydra import math from . import utils from .utils import cross_product def x_rot(angle): if len(angle.shape) == 0: angle = angle.unsqueeze(0) angle = utils.convert_into_at_least_2d_pytorch_tensor(angle).squeeze(1) batch_size = angle.shape[0] R = torch.zeros((batch_size, 3, 3)) R[:, 0, 0] = torch.ones(batch_size) R[:, 1, 1] = torch.cos(angle) R[:, 1, 2] = -torch.sin(angle) R[:, 2, 1] = torch.sin(angle) R[:, 2, 2] = torch.cos(angle) return R def y_rot(angle): if len(angle.shape) == 0: angle = angle.unsqueeze(0) angle = utils.convert_into_at_least_2d_pytorch_tensor(angle).squeeze(1) batch_size = angle.shape[0] R = torch.zeros((batch_size, 3, 3)) R[:, 0, 0] = torch.cos(angle) R[:, 0, 2] = torch.sin(angle) R[:, 1, 1] = torch.ones(batch_size) R[:, 2, 0] = -torch.sin(angle) R[:, 2, 2] = torch.cos(angle) return R def z_rot(angle): if len(angle.shape) == 0: angle = angle.unsqueeze(0) angle = utils.convert_into_at_least_2d_pytorch_tensor(angle).squeeze(1) batch_size = angle.shape[0] R = torch.zeros((batch_size, 3, 3)) R[:, 0, 0] = torch.cos(angle) R[:, 0, 1] = -torch.sin(angle) R[:, 1, 0] = torch.sin(angle) R[:, 1, 1] = torch.cos(angle) R[:, 2, 2] = torch.ones(batch_size) return R class CoordinateTransform(object): def __init__(self, rot=None, trans=None): if rot is None: self._rot = torch.eye(3) else: self._rot = rot if len(self._rot.shape) == 2: self._rot = self._rot.unsqueeze(0) if trans is None: self._trans = torch.zeros(3) else: self._trans = trans if len(self._trans.shape) == 1: self._trans = self._trans.unsqueeze(0) def set_translation(self, t): self._trans = t if len(self._trans.shape) == 1: self._trans = self._trans.unsqueeze(0) return def set_rotation(self, rot): self._rot = rot if len(self._rot.shape) == 2: self._rot = self._rot.unsqueeze(0) return def rotation(self): return self._rot def translation(self): return self._trans def inverse(self): rot_transpose = self._rot.transpose(-2, -1) return CoordinateTransform(rot_transpose, -(rot_transpose @ self._trans.unsqueeze(2)).squeeze(2)) def multiply_transform(self, coordinate_transform): new_rot = self._rot @ coordinate_transform.rotation() new_trans = (self._rot @ coordinate_transform.translation().unsqueeze(2)).squeeze(2) + self._trans return CoordinateTransform(new_rot, new_trans) def trans_cross_rot(self): return utils.vector3_to_skew_symm_matrix(self._trans) @ self._rot def get_quaternion(self): batch_size = self._rot.shape[0] M = torch.zeros((batch_size, 4, 4)).to(self._rot.device) M[:, :3, :3] = self._rot M[:, :3, 3] = self._trans M[:, 3, 3] = 1 q = torch.empty((batch_size, 4)).to(self._rot.device) t = torch.einsum('bii->b', M) #torch.trace(M) for n in range(batch_size): tn = t[n] if tn > M[n, 3, 3]: q[n, 3] = tn q[n, 2] = M[n, 1, 0] - M[n, 0, 1] q[n, 1] = M[n, 0, 2] - M[n, 2, 0] q[n, 0] = M[n, 2, 1] - M[n, 1, 2] else: i, j, k = 0, 1, 2 if M[n, 1, 1] > M[n, 0, 0]: i, j, k = 1, 2, 0 if M[n, 2, 2] > M[n, i, i]: i, j, k = 2, 0, 1 tn = M[n, i, i] - (M[n, j, j] + M[n, k, k]) + M[n, 3, 3] q[n, i] = tn q[n, j] = M[n, i, j] + M[n, j, i] q[n, k] = M[n, k, i] + M[n, i, k] q[n, 3] = M[n, k, j] - M[n, j, k] #q = q[[3, 0, 1, 2]] q[n, :] *= 0.5 / math.sqrt(tn * M[n, 3, 3]) return q def to_matrix(self): mat = torch.zeros((6, 6)) t = torch.zeros((3, 3)) t[0, 1] = -self._trans[0, 2] t[0, 2] = self._trans[0, 1] t[1, 0] = self._trans[0, 2] t[1, 2] = -self._trans[0, 0] t[2, 0] = -self._trans[0, 1] t[2, 1] = self._trans[0, 0] _Erx = self._rot[0].transpose(-2, 1).matmul(t) mat[:3, :3] = self._rot[0].transpose(-2, 1) mat[3:, 0:3] = -_Erx mat[3:, 3:] = self._rot[0].transpose(-2, 1) return mat def to_matrix_transpose(self): mat = torch.zeros((6, 6)) t = torch.zeros((3, 3)) t[0, 1] = -self._trans[0, 2] t[0, 2] = self._trans[0, 1] t[1, 0] = self._trans[0, 2] t[1, 2] = -self._trans[0, 0] t[2, 0] = -self._trans[0, 1] t[2, 1] = self._trans[0, 0] _Erx = self._rot[0].matmul(t) mat[:3, :3] = self._rot[0].transpose(1, 0) mat[3:, 0:3] = -_Erx.transpose(1, 0) mat[3:, 3:] = self._rot[0].transpose(1, 0) return mat class SpatialMotionVec(object): def __init__(self, lin_motion: torch.Tensor = torch.zeros((1, 3)), ang_motion: torch.Tensor = torch.zeros((1, 3)) ): self.lin = lin_motion self.ang = ang_motion def add_motion_vec(self, smv: SpatialMotionVec) -> SpatialMotionVec: r""" Args: smv: spatial motion vector Returns: the sum of motion vectors """ return SpatialMotionVec(self.lin + smv.lin, self.ang + smv.ang) def cross_motion_vec(self, smv: SpatialMotionVec) -> SpatialMotionVec: r""" Args: smv: spatial motion vector Returns: the cross product between motion vectors """ new_ang = cross_product(self.ang, smv.ang) new_lin = cross_product(self.ang, smv.lin) + cross_product(self.lin, smv.ang) return SpatialMotionVec(new_lin, new_ang) def cross_force_vec(self, sfv: SpatialForceVec) -> SpatialForceVec: r""" Args: sfv: spatial force vector Returns: the cross product between motion (self) and force vector """ new_ang = cross_product(self.ang, sfv.ang) + cross_product(self.lin, sfv.lin) new_lin = cross_product(self.ang, sfv.lin) return SpatialForceVec(new_lin, new_ang) def transform(self, transform: CoordinateTransform) -> SpatialMotionVec: r""" Args: transform: a coordinate transform object Returns: the motion vector (self) transformed by the coordinate transform """ new_ang = (transform.rotation() @ self.ang.unsqueeze(2)).squeeze(2) new_lin = (transform.trans_cross_rot() @ self.ang.unsqueeze(2)).squeeze(2) new_lin += (transform.rotation() @ self.lin.unsqueeze(2)).squeeze(2) return SpatialMotionVec(new_lin, new_ang) def get_vector(self): return torch.cat([self.ang, self.lin], dim=1) def multiply(self, v): batch_size = self.lin.shape[0] return SpatialForceVec(self.lin*v.view(batch_size, 1), self.ang*v.view(batch_size, 1)) def dot(self, smv): batch_size, n_d = self.ang.shape tmp1 = torch.bmm(self.ang.view(batch_size, 1, n_d), smv.ang.view(batch_size, n_d, 1)).squeeze() tmp2 = torch.bmm(self.lin.view(batch_size, 1, n_d), smv.lin.view(batch_size, n_d, 1)).squeeze() return tmp1 + tmp2 #return self.ang[0].dot(smv.ang[0]) + self.lin[0].dot(smv.lin[0]) class SpatialForceVec(object): def __init__(self, lin_force: torch.Tensor = torch.zeros((1, 3)), ang_force: torch.Tensor = torch.zeros((1, 3)) ): self.lin = lin_force self.ang = ang_force def add_force_vec(self, sfv: SpatialForceVec) -> SpatialForceVec: r""" Args: sfv: spatial force vector Returns: the sum of force vectors """ return SpatialForceVec(self.lin + sfv.lin, self.ang + sfv.ang) def transform(self, transform: CoordinateTransform) -> SpatialForceVec: r""" Args: transform: a coordinate transform object Returns: the force vector (self) transformed by the coordinate transform """ new_lin = (transform.rotation() @ self.lin.unsqueeze(2)).squeeze(2) new_ang = (transform.trans_cross_rot() @ self.lin.unsqueeze(2)).squeeze(2) new_ang += (transform.rotation() @ self.ang.unsqueeze(2)).squeeze(2) return SpatialForceVec(new_lin, new_ang) def get_vector(self): return torch.cat([self.ang, self.lin], dim=1) def multiply(self, v): batch_size = self.lin.shape[0] return SpatialForceVec(self.lin*v.view(batch_size, 1), self.ang*v.view(batch_size, 1)) def dot(self, smv): #return self.ang[0].dot(smv.ang[0]) + self.lin[0].dot(smv.lin[0]) batch_size, n_d = self.ang.shape tmp1 = torch.bmm(self.ang.view(batch_size, 1, n_d), smv.ang.view(batch_size, n_d, 1)).squeeze() tmp2 = torch.bmm(self.lin.view(batch_size, 1, n_d), smv.lin.view(batch_size, n_d, 1)).squeeze() return tmp1 + tmp2 class DifferentiableSpatialRigidBodyInertia(torch.nn.Module): def __init__(self, rigid_body_params): super().__init__() self.mass = rigid_body_params["mass"] self.com = rigid_body_params["com"] self.inertia_mat = rigid_body_params["inertia_mat"] def _get_parameter_values(self): return self.mass, self.com, self.inertia_mat def multiply_motion_vec(self, smv): mass, com, inertia_mat = self._get_parameter_values() mcom = com * mass com_skew_symm_mat = utils.vector3_to_skew_symm_matrix(com) inertia = inertia_mat + mass * ( com_skew_symm_mat @ com_skew_symm_mat.transpose(-2, -1) ) batch_size = smv.lin.shape[0] new_lin_force = mass * smv.lin - utils.cross_product( mcom.repeat(batch_size, 1), smv.ang ) new_ang_force = (inertia.repeat(batch_size, 1, 1) @ smv.ang.unsqueeze(2)).squeeze( 2 ) + utils.cross_product(mcom.repeat(batch_size, 1), smv.lin) return SpatialForceVec(new_lin_force, new_ang_force) def get_spatial_mat(self): mass, com, inertia_mat = self._get_parameter_values() mcom = mass * com com_skew_symm_mat = utils.vector3_to_skew_symm_matrix(com) inertia = inertia_mat + mass * ( com_skew_symm_mat @ com_skew_symm_mat.transpose(-2, -1) ) mat = torch.zeros((6, 6)) mat[:3, :3] = inertia mat[3, 0] = 0; mat[3, 1] = mcom[0, 2]; mat[3, 2] = -mcom[0, 1] mat[4, 0] = -mcom[0, 2]; mat[4, 1] = 0.0; mat[4, 2] = mcom[0, 0] mat[5, 0] = mcom[0, 1]; mat[5, 1] = -mcom[0, 0]; mat[5, 2] = 0.0 mat[0, 3] = 0; mat[0, 4] = -mcom[0, 2]; mat[0, 5] = mcom[0, 1] mat[1, 3] = mcom[0, 2]; mat[1, 4] = 0.0; mat[1, 5] = -mcom[0, 0] mat[2, 3] = -mcom[0, 1]; mat[2, 4] = mcom[0, 0]; mat[2, 5] = 0.0 mat[3, 3] = mass mat[4, 4] = mass mat[5, 5] = mass return mat class LearnableSpatialRigidBodyInertia(DifferentiableSpatialRigidBodyInertia): def __init__(self, learnable_rigid_body_config, rigid_body_params): super().__init__(rigid_body_params) # we overwrite dynamics parameters if "mass" in learnable_rigid_body_config.learnable_dynamics_params: self.mass_fn = hydra.utils.instantiate( learnable_rigid_body_config.mass_parametrization ) else: self.mass_fn = lambda: self.mass if "com" in learnable_rigid_body_config.learnable_dynamics_params: self.com_fn = hydra.utils.instantiate( learnable_rigid_body_config.com_parametrization ) else: self.com_fn = lambda: self.com if "inertia_mat" in learnable_rigid_body_config.learnable_dynamics_params: self.inertia_mat_fn = hydra.utils.instantiate(learnable_rigid_body_config.inertia_parametrization) else: self.inertia_mat_fn = lambda: self.inertia_mat def _get_parameter_values(self): return self.mass_fn(), self.com_fn(), self.inertia_mat_fn()
35.282486
110
0.569656
from __future__ import annotations import torch import hydra import math from . import utils from .utils import cross_product def x_rot(angle): if len(angle.shape) == 0: angle = angle.unsqueeze(0) angle = utils.convert_into_at_least_2d_pytorch_tensor(angle).squeeze(1) batch_size = angle.shape[0] R = torch.zeros((batch_size, 3, 3)) R[:, 0, 0] = torch.ones(batch_size) R[:, 1, 1] = torch.cos(angle) R[:, 1, 2] = -torch.sin(angle) R[:, 2, 1] = torch.sin(angle) R[:, 2, 2] = torch.cos(angle) return R def y_rot(angle): if len(angle.shape) == 0: angle = angle.unsqueeze(0) angle = utils.convert_into_at_least_2d_pytorch_tensor(angle).squeeze(1) batch_size = angle.shape[0] R = torch.zeros((batch_size, 3, 3)) R[:, 0, 0] = torch.cos(angle) R[:, 0, 2] = torch.sin(angle) R[:, 1, 1] = torch.ones(batch_size) R[:, 2, 0] = -torch.sin(angle) R[:, 2, 2] = torch.cos(angle) return R def z_rot(angle): if len(angle.shape) == 0: angle = angle.unsqueeze(0) angle = utils.convert_into_at_least_2d_pytorch_tensor(angle).squeeze(1) batch_size = angle.shape[0] R = torch.zeros((batch_size, 3, 3)) R[:, 0, 0] = torch.cos(angle) R[:, 0, 1] = -torch.sin(angle) R[:, 1, 0] = torch.sin(angle) R[:, 1, 1] = torch.cos(angle) R[:, 2, 2] = torch.ones(batch_size) return R class CoordinateTransform(object): def __init__(self, rot=None, trans=None): if rot is None: self._rot = torch.eye(3) else: self._rot = rot if len(self._rot.shape) == 2: self._rot = self._rot.unsqueeze(0) if trans is None: self._trans = torch.zeros(3) else: self._trans = trans if len(self._trans.shape) == 1: self._trans = self._trans.unsqueeze(0) def set_translation(self, t): self._trans = t if len(self._trans.shape) == 1: self._trans = self._trans.unsqueeze(0) return def set_rotation(self, rot): self._rot = rot if len(self._rot.shape) == 2: self._rot = self._rot.unsqueeze(0) return def rotation(self): return self._rot def translation(self): return self._trans def inverse(self): rot_transpose = self._rot.transpose(-2, -1) return CoordinateTransform(rot_transpose, -(rot_transpose @ self._trans.unsqueeze(2)).squeeze(2)) def multiply_transform(self, coordinate_transform): new_rot = self._rot @ coordinate_transform.rotation() new_trans = (self._rot @ coordinate_transform.translation().unsqueeze(2)).squeeze(2) + self._trans return CoordinateTransform(new_rot, new_trans) def trans_cross_rot(self): return utils.vector3_to_skew_symm_matrix(self._trans) @ self._rot def get_quaternion(self): batch_size = self._rot.shape[0] M = torch.zeros((batch_size, 4, 4)).to(self._rot.device) M[:, :3, :3] = self._rot M[:, :3, 3] = self._trans M[:, 3, 3] = 1 q = torch.empty((batch_size, 4)).to(self._rot.device) t = torch.einsum('bii->b', M) for n in range(batch_size): tn = t[n] if tn > M[n, 3, 3]: q[n, 3] = tn q[n, 2] = M[n, 1, 0] - M[n, 0, 1] q[n, 1] = M[n, 0, 2] - M[n, 2, 0] q[n, 0] = M[n, 2, 1] - M[n, 1, 2] else: i, j, k = 0, 1, 2 if M[n, 1, 1] > M[n, 0, 0]: i, j, k = 1, 2, 0 if M[n, 2, 2] > M[n, i, i]: i, j, k = 2, 0, 1 tn = M[n, i, i] - (M[n, j, j] + M[n, k, k]) + M[n, 3, 3] q[n, i] = tn q[n, j] = M[n, i, j] + M[n, j, i] q[n, k] = M[n, k, i] + M[n, i, k] q[n, 3] = M[n, k, j] - M[n, j, k] q[n, :] *= 0.5 / math.sqrt(tn * M[n, 3, 3]) return q def to_matrix(self): mat = torch.zeros((6, 6)) t = torch.zeros((3, 3)) t[0, 1] = -self._trans[0, 2] t[0, 2] = self._trans[0, 1] t[1, 0] = self._trans[0, 2] t[1, 2] = -self._trans[0, 0] t[2, 0] = -self._trans[0, 1] t[2, 1] = self._trans[0, 0] _Erx = self._rot[0].transpose(-2, 1).matmul(t) mat[:3, :3] = self._rot[0].transpose(-2, 1) mat[3:, 0:3] = -_Erx mat[3:, 3:] = self._rot[0].transpose(-2, 1) return mat def to_matrix_transpose(self): mat = torch.zeros((6, 6)) t = torch.zeros((3, 3)) t[0, 1] = -self._trans[0, 2] t[0, 2] = self._trans[0, 1] t[1, 0] = self._trans[0, 2] t[1, 2] = -self._trans[0, 0] t[2, 0] = -self._trans[0, 1] t[2, 1] = self._trans[0, 0] _Erx = self._rot[0].matmul(t) mat[:3, :3] = self._rot[0].transpose(1, 0) mat[3:, 0:3] = -_Erx.transpose(1, 0) mat[3:, 3:] = self._rot[0].transpose(1, 0) return mat class SpatialMotionVec(object): def __init__(self, lin_motion: torch.Tensor = torch.zeros((1, 3)), ang_motion: torch.Tensor = torch.zeros((1, 3)) ): self.lin = lin_motion self.ang = ang_motion def add_motion_vec(self, smv: SpatialMotionVec) -> SpatialMotionVec: return SpatialMotionVec(self.lin + smv.lin, self.ang + smv.ang) def cross_motion_vec(self, smv: SpatialMotionVec) -> SpatialMotionVec: new_ang = cross_product(self.ang, smv.ang) new_lin = cross_product(self.ang, smv.lin) + cross_product(self.lin, smv.ang) return SpatialMotionVec(new_lin, new_ang) def cross_force_vec(self, sfv: SpatialForceVec) -> SpatialForceVec: new_ang = cross_product(self.ang, sfv.ang) + cross_product(self.lin, sfv.lin) new_lin = cross_product(self.ang, sfv.lin) return SpatialForceVec(new_lin, new_ang) def transform(self, transform: CoordinateTransform) -> SpatialMotionVec: new_ang = (transform.rotation() @ self.ang.unsqueeze(2)).squeeze(2) new_lin = (transform.trans_cross_rot() @ self.ang.unsqueeze(2)).squeeze(2) new_lin += (transform.rotation() @ self.lin.unsqueeze(2)).squeeze(2) return SpatialMotionVec(new_lin, new_ang) def get_vector(self): return torch.cat([self.ang, self.lin], dim=1) def multiply(self, v): batch_size = self.lin.shape[0] return SpatialForceVec(self.lin*v.view(batch_size, 1), self.ang*v.view(batch_size, 1)) def dot(self, smv): batch_size, n_d = self.ang.shape tmp1 = torch.bmm(self.ang.view(batch_size, 1, n_d), smv.ang.view(batch_size, n_d, 1)).squeeze() tmp2 = torch.bmm(self.lin.view(batch_size, 1, n_d), smv.lin.view(batch_size, n_d, 1)).squeeze() return tmp1 + tmp2 class SpatialForceVec(object): def __init__(self, lin_force: torch.Tensor = torch.zeros((1, 3)), ang_force: torch.Tensor = torch.zeros((1, 3)) ): self.lin = lin_force self.ang = ang_force def add_force_vec(self, sfv: SpatialForceVec) -> SpatialForceVec: return SpatialForceVec(self.lin + sfv.lin, self.ang + sfv.ang) def transform(self, transform: CoordinateTransform) -> SpatialForceVec: new_lin = (transform.rotation() @ self.lin.unsqueeze(2)).squeeze(2) new_ang = (transform.trans_cross_rot() @ self.lin.unsqueeze(2)).squeeze(2) new_ang += (transform.rotation() @ self.ang.unsqueeze(2)).squeeze(2) return SpatialForceVec(new_lin, new_ang) def get_vector(self): return torch.cat([self.ang, self.lin], dim=1) def multiply(self, v): batch_size = self.lin.shape[0] return SpatialForceVec(self.lin*v.view(batch_size, 1), self.ang*v.view(batch_size, 1)) def dot(self, smv): batch_size, n_d = self.ang.shape tmp1 = torch.bmm(self.ang.view(batch_size, 1, n_d), smv.ang.view(batch_size, n_d, 1)).squeeze() tmp2 = torch.bmm(self.lin.view(batch_size, 1, n_d), smv.lin.view(batch_size, n_d, 1)).squeeze() return tmp1 + tmp2 class DifferentiableSpatialRigidBodyInertia(torch.nn.Module): def __init__(self, rigid_body_params): super().__init__() self.mass = rigid_body_params["mass"] self.com = rigid_body_params["com"] self.inertia_mat = rigid_body_params["inertia_mat"] def _get_parameter_values(self): return self.mass, self.com, self.inertia_mat def multiply_motion_vec(self, smv): mass, com, inertia_mat = self._get_parameter_values() mcom = com * mass com_skew_symm_mat = utils.vector3_to_skew_symm_matrix(com) inertia = inertia_mat + mass * ( com_skew_symm_mat @ com_skew_symm_mat.transpose(-2, -1) ) batch_size = smv.lin.shape[0] new_lin_force = mass * smv.lin - utils.cross_product( mcom.repeat(batch_size, 1), smv.ang ) new_ang_force = (inertia.repeat(batch_size, 1, 1) @ smv.ang.unsqueeze(2)).squeeze( 2 ) + utils.cross_product(mcom.repeat(batch_size, 1), smv.lin) return SpatialForceVec(new_lin_force, new_ang_force) def get_spatial_mat(self): mass, com, inertia_mat = self._get_parameter_values() mcom = mass * com com_skew_symm_mat = utils.vector3_to_skew_symm_matrix(com) inertia = inertia_mat + mass * ( com_skew_symm_mat @ com_skew_symm_mat.transpose(-2, -1) ) mat = torch.zeros((6, 6)) mat[:3, :3] = inertia mat[3, 0] = 0; mat[3, 1] = mcom[0, 2]; mat[3, 2] = -mcom[0, 1] mat[4, 0] = -mcom[0, 2]; mat[4, 1] = 0.0; mat[4, 2] = mcom[0, 0] mat[5, 0] = mcom[0, 1]; mat[5, 1] = -mcom[0, 0]; mat[5, 2] = 0.0 mat[0, 3] = 0; mat[0, 4] = -mcom[0, 2]; mat[0, 5] = mcom[0, 1] mat[1, 3] = mcom[0, 2]; mat[1, 4] = 0.0; mat[1, 5] = -mcom[0, 0] mat[2, 3] = -mcom[0, 1]; mat[2, 4] = mcom[0, 0]; mat[2, 5] = 0.0 mat[3, 3] = mass mat[4, 4] = mass mat[5, 5] = mass return mat class LearnableSpatialRigidBodyInertia(DifferentiableSpatialRigidBodyInertia): def __init__(self, learnable_rigid_body_config, rigid_body_params): super().__init__(rigid_body_params) if "mass" in learnable_rigid_body_config.learnable_dynamics_params: self.mass_fn = hydra.utils.instantiate( learnable_rigid_body_config.mass_parametrization ) else: self.mass_fn = lambda: self.mass if "com" in learnable_rigid_body_config.learnable_dynamics_params: self.com_fn = hydra.utils.instantiate( learnable_rigid_body_config.com_parametrization ) else: self.com_fn = lambda: self.com if "inertia_mat" in learnable_rigid_body_config.learnable_dynamics_params: self.inertia_mat_fn = hydra.utils.instantiate(learnable_rigid_body_config.inertia_parametrization) else: self.inertia_mat_fn = lambda: self.inertia_mat def _get_parameter_values(self): return self.mass_fn(), self.com_fn(), self.inertia_mat_fn()
true
true
1c3543996fec7869a3dff75efacb00046a33f2f5
18,537
py
Python
cryolo_relion_wrapper/cryolo_wrapper_library.py
Luger-Lab/Cryo-EM
3eb62434181d6ce438190230758f6ce0e6b20af8
[ "MIT" ]
1
2020-12-23T20:14:16.000Z
2020-12-23T20:14:16.000Z
cryolo_relion_wrapper/cryolo_wrapper_library.py
Luger-Lab/Cryo-EM
3eb62434181d6ce438190230758f6ce0e6b20af8
[ "MIT" ]
null
null
null
cryolo_relion_wrapper/cryolo_wrapper_library.py
Luger-Lab/Cryo-EM
3eb62434181d6ce438190230758f6ce0e6b20af8
[ "MIT" ]
null
null
null
#!/usr/bin/env python ''' Original version written by Samuel Bowerman (6/22/2021) ''' import os,sys,glob,datetime,itertools,subprocess,shutil import numpy as np def do_denoise(args,logfile): starttime = datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") logfile.write("Beginning 'Denoise' task at "+starttime+"\n\n") mics_star_path = args.in_mics #Extract list of .mrc files from input .star file mics_star_file = open(mics_star_path,'r') star_lines = mics_star_file.readlines() mics_star_file.close() #We will also use the mic_file_list later to build the output .star file mic_file_list = [] star_line_list= [] denoised_out_star = open(os.path.join(args.o,"micrographs_denoised.star"),'w') denoised_star_header_complete = False for line in star_lines: first_column = line.split(" ")[0] #the mrc image should consistently be the first thing in the list, if the line describes a micrograph (either from MotCorr or from CtfFind job if "MotionCorr" in first_column: #Will always be the first column, if in a correct row mic_file_list.append(first_column) if not denoised_star_header_complete: denoised_out_star.write("_rlnDenoisedMicrograph #"+str(rln_idx+1)+"\n") denoised_star_header_complete = True #We don't want to include the "new line" character (the "-1"th index) because we are adding to the line #denoised_out_star.write(line[:-1]+"\t") star_line_list.append(line[:-1]+" ") else: denoised_out_star.write(line) if "_rln" in line: #Need to keep track of how many _rlnColumns we have rln_idx = int(line.split(" ")[-2].replace("#","")) #The "-1" idx is the new line character, so the "-2" idx will be "#[rln_idx]" string #Randomly pick args.nmic number of files to denoise etc. mic_idx = np.arange(len(mic_file_list)) denoise_idxs = np.random.choice(mic_idx,size=args.nmic,replace=False) denoise_list = np.array(mic_file_list)[denoise_idxs] denoise_star_lines = np.array(star_line_list)[denoise_idxs] for idx in range(args.nmic): mic_basename = os.path.basename(denoise_list[idx]) denoised_mic = os.path.join(args.o,"denoised/for_picking/"+mic_basename) denoised_out_star.write(denoise_star_lines[idx]+denoised_mic+"\n") denoised_out_star.close() #Create a folder to house the mic list for denoising/manual picking for_pick_folder = os.path.join(args.o,"for_picking") if not os.path.isdir(for_pick_folder): logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": creating folder = "+for_pick_folder+"\n") os.mkdir(for_pick_folder) #If the folder does exist, get rid of old symlinks else: for FILE in glob.glob(os.path.join(for_pick_folder,"*.*")): os.unlink(FILE) #This folder will hold the post-denoising mics after_denoise_folder = os.path.join(args.o,"denoised") if not os.path.isdir(after_denoise_folder): logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": creating folder = "+after_denoise_folder+"\n") os.mkdir(after_denoise_folder) #put symbolic links to the randomly-selected micrographs in to the "for_picking" folder for FILE in denoise_list: #Call os.path.basename to remove the prefix path (maintained by glob.glob call) link_path = os.path.join(for_pick_folder,os.path.basename(FILE)) logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": creating symbolic link for micrograph: "+FILE+" -> "+link_path) os.symlink(os.path.join(os.getcwd(),FILE),link_path) #Now that the preparation work has been done, start denoising micrographs function_call = "janni_denoise.py denoise -ol 24 -bs 4 -g 0 "+for_pick_folder+" " +after_denoise_folder+" "+args.n_model logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": Sending function call = "+function_call+"\n") #Need to split the program and inputs in subprocess call (needs to be list, not string) function_call_split = function_call.split(" ") subprocess.call(function_call_split) #Need to make pipeline .star file information out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"micrographs_denoised.star")+"\t1\n") out_nodes_star.close() def do_manual_pick(args,logfile): starttime = datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") logfile.write("Beginning 'Manual Pick' task at "+starttime+"\n\n") logfile.write("Opening input micrographs star file: "+args.in_mics+"\n") denoised_star = open(args.in_mics,'r') denoised_star_lines = denoised_star.readlines() denoised_star.close() #Generate a folder within the job to link the mics for picking denoise_mic_folder = os.path.join(args.o,"denoised_folder") raw_mic_folder= os.path.join(args.o,"raw_image_folder") manual_pick_folder = os.path.join(args.o,"boxes") #If folders don't exist, then make them logfile.write("Generating folders for symbolic links and particle picks: %s; %s; %s\n" % (denoise_mic_folder, raw_mic_folder, manual_pick_folder)) if not os.path.isdir(denoise_mic_folder): os.mkdir(denoise_mic_folder) if not os.path.isdir(raw_mic_folder): os.mkdir(raw_mic_folder) if not os.path.isdir(manual_pick_folder): os.mkdir(manual_pick_folder) #look for lines containing micrograph information, denoised images will be final column logfile.write("Determining raw and denoised micrographs for manual picking from "+args.in_mics+"\n") for LINE in denoised_star_lines: LINE = LINE.replace("\n","").replace("\t","") #Things got weird around the new-line character, so I just got rid of it if "MotionCorr" in LINE: #The micrograph lines will just bet the ones with "MotionCorr" in the first column split_line = LINE.split(" ") #The original motion-corrected file will be the first column, the denoised version the last column link_path_prefix = os.getcwd() raw_rel_path = split_line[0] raw_basename = os.path.basename(raw_rel_path) raw_link_src = os.path.join(link_path_prefix,raw_rel_path) raw_link_dest= os.path.join(raw_mic_folder,raw_basename) currtime= datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") if not os.path.exists(raw_link_dest): if os.path.exists(raw_link_src): logfile.write(currtime+" = creating symbolic link "+raw_link_src+" -> "+raw_link_dest+"\n") os.symlink(raw_link_src,raw_link_dest) else: logfile.write(currtime+" = could not find source for symbolic link ("+raw_link_src+" -> "+raw_link_dest+")\n") else: logfile.write(currtime+" = Symbolic link already present at destination ("+raw_link_dest+")\n") denoise_rel_path = split_line[-1] #For some reason, "-1" isn't the new line character for .star files written through this python wrapper? denoise_basename = os.path.basename(denoise_rel_path) denoise_link_src = os.path.join(link_path_prefix,denoise_rel_path) denoise_link_dest= os.path.join(denoise_mic_folder,denoise_basename) if not os.path.exists(denoise_link_dest): currtime = datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") if os.path.exists(denoise_link_src): logfile.write(currtime+" = creating symbolic link "+denoise_link_src+" -> "+denoise_link_dest+"\n") os.symlink(denoise_link_src,denoise_link_dest) else: logfile.write(currtime+" = could not find source for symbolic link ("+denoise_link_src+" -> "+denoise_link_dest+")\n") else: logfile.write(currtime+" = Symbolic link already present at destination ("+denoise_link_dest+")\n") function_call = "cryolo_boxmanager.py -i "+denoise_mic_folder function_call_split = function_call.split(" ") logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": sending function call = "+function_call+"\n") subprocess.call(function_call_split) #Need to code-in some kind of pipeline.star file for information for training train_star = open(os.path.join(args.o,"micrographs_train_metadata.star"),'w') #The first row will define the path to "train_images" train_star.write(os.path.join(args.o,"raw_image_folder")+"\n") #The second row will define the path to the "train_boxes" folder train_star.write(os.path.join(args.o,"boxes")+"\n") train_star.close() #Star file for managing relion pipeline flow out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"micrographs_train_metadata.star")+" 1\n") out_nodes_star.close() def do_train(args,logfile): starttime = get_time() logfile.write("Beginning 'Training' task at "+starttime+"\n\n") train_paths_star = open(args.in_mics,'r') train_folders = train_paths_star.readlines() train_images = train_folders[0].replace("\n","") train_boxes = train_folders[1].replace("\n","") config_path = train_folders[2].replace("\n","") model_name = train_folders[3].replace("\n","") function_call = "cryolo_gui.py train -c "+config_path+" -nc "+args.j+" -w 5" this_time = get_time() logfile.write(this_time+" = sending function call: "+function_call+"\n") function_call_split = function_call.split(" ") subprocess.call(function_call_split) #Copy the output picking model to the folder, to prevent potential over-writing by future trainings shutil.copyfile(model_name,os.path.join(args.o,model_name)) #Pipeline STAR out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"particles_model.star")+" 3\n") out_nodes_star.close() #STAR file containing meta-data for picking trained_star = open(os.path.join(args.o,"particles_model.star"),'w') trained_star.write(os.path.join(args.o,model_name)+"\n") trained_star.write(config_path+"\n") trained_star.close() def do_predict(args,logfile): starttime = get_time() logfile.write("Beginning 'Predict' task at "+starttime+"\n\n") mic_ctf_star = open(args.in_mics, 'r') cryolo_star = open(args.in_parts,'r') cryolo_lines = cryolo_star.readlines() model_path = cryolo_lines[0].replace("\n","") config_path = cryolo_lines[1].replace("\n","") this_time = get_time() logfile.write(this_time+" = Using model "+model_path+" to pick particles on micrographs in "+args.in_mics+"\n") out_box_folder = os.path.join(args.o,"particles") #Find the micrograph folder from CTF path mic_ctf_star_lines = mic_ctf_star.readlines() #In case there are multiple runs joined together, we are going to look for unique CtfFind/micrographs folders, instead of just using the first we find ctf_mic_path_list = [] CTFcol = 2 #Assume .star file follows default order but still explicitly identify below, just in case for LINE in mic_ctf_star_lines: if "_rlnCtfImage" in LINE: #Have to fix weird spacing issue LINE = LINE.replace(" \n","") CTFcol = int(LINE.split(" ")[-1].replace("#","")) - 1 #Relion is 1-indexed, python is 0-indexed if "MotionCorr" in LINE: while " " in LINE: #since we are splitting by " ", we need to make sure there aren't any multi-" " left LINE = LINE.replace(" "," ") split_line = LINE.split(" ") CTF_full_path = split_line[CTFcol].replace(":mrc","") #Remove the weird nomenclature from .star file CTF_mic_folder= os.path.split(CTF_full_path)[0] ctf_mic_path_list.append(CTF_mic_folder) #Pull only the unique CTF/micrographs folders unique_paths = np.unique(np.array(ctf_mic_path_list)) this_time = get_time() logfile.write(this_time+" = Identified the following micrograph path(s): "+str(unique_paths)+"\n") #We need to make symbolic links to a unified folder mic_folder = os.path.join(args.o,"micrographs") if not os.path.exists(mic_folder): os.mkdir(mic_folder) for mic_path in unique_paths: full_path = os.path.join(os.getcwd(),mic_path) mic_list = glob.glob(os.path.join(full_path,"*.mrc")) for MIC in mic_list: link_path = os.path.join(mic_folder,os.path.basename(MIC)) if os.path.exists(link_path): os.unlink(link_path) os.symlink(MIC,link_path) #Make a folder for storing particle picks out_box_folder = os.path.join(args.o,"boxes") if not os.path.exists(out_box_folder): os.mkdir(out_box_folder) function_call = "cryolo_gui.py predict -c "+config_path+" -w "+model_path+" -i "+mic_folder+" -o "+out_box_folder+" -t "+str(args.threshold)+" -d "+str(args.distance)+" -nc "+args.j this_time = get_time() logfile.write(this_time+" = sending function call: "+function_call+"\n") function_call_split = function_call.split(" ") subprocess.call(function_call_split) #Pipeline STAR out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"coords_suffix_cryolo.star")+" 2\n") out_nodes_star.close() #Relion requires you to define coordinate suffixes for filenames (in this case "cryolo") suffix_star = open(os.path.join(args.o,"coords_suffix_cryolo.star"),'w') suffix_star.write(args.in_mics+"\n") suffix_star.close() #Take the .star files from cryolo and put them in relion-type arrangement star_list = glob.glob(os.path.join(args.o,"boxes/STAR/*.star")) for STAR in star_list: full_star_path = os.path.join(os.getcwd(),STAR) link_path = os.path.join(args.o,"micrographs/"+os.path.basename(STAR)).replace(".star","_cryolo.star") link_path.replace(".star","_cryolo.star") # have to do the swap defined by suffix above if os.path.exists(link_path): os.unlink(link_path) os.symlink(full_star_path,link_path) def do_config_setup(args,logfile): starttime = get_time() logfile.write("Beginning 'Config Setup' task at "+starttime+"\n\n") this_time = get_time() logfile.write(this_time+" = Getting training micrographs and boxes from "+args.in_mics+"\n") train_paths_star = open(args.in_mics,'r') train_folders = train_paths_star.readlines() train_images = train_folders[0].replace("\n","") train_boxes = train_folders[1].replace("\n","") this_time = get_time() logfile.write(this_time+" = Train images ("+train_images+") and training boxes ("+train_boxes+") folders identified.\n") this_time = get_time() logfile.write(this_time+" = Getting box size from training boxes .box files\n") train_box_files = glob.glob(os.path.join(train_boxes,"*.box")) x_coord,y_coord,xbox,ybox = np.genfromtxt(train_box_files[0],dtype=int,unpack=True) boxsize = np.copy(np.unique(xbox)[0]) config_path = os.path.join(args.o,"config_cryolo.json") function_call = "cryolo_gui.py config --train_image_folder "+train_images+" --train_annot_folder "+train_boxes+" --saved_weights_name "+args.p_model+" --filter JANNI --janni_model "+args.n_model+" --log_path "+os.path.join(args.o,"cryolo_log.log "+config_path+" "+str(boxsize)) this_time = get_time() logfile.write(this_time+" = Sending function call: "+function_call+"\n") function_call_split = function_call.split(" ") subprocess.call(function_call_split) #Copy the manually-picked training details to this job, which will then feed to the actual train job box_src = os.path.join(os.getcwd(),train_boxes) box_dest= os.path.join(args.o,"train_boxes") if os.path.exists(box_dest): os.unlink(box_dest) os.symlink(box_src,box_dest) mic_src = os.path.join(os.getcwd(),train_images) mic_dest= os.path.join(args.o,"train_images") if os.path.exists(mic_dest): os.unlink(mic_dest) os.symlink(mic_src,mic_dest) #Put the metadata in a dummy star file, like before config_star_path = os.path.join(args.o,"micrographs_config.star") config_star = open(config_star_path,'w') config_star.write(os.path.join(args.o,"train_images")+"\n") config_star.write(os.path.join(args.o,"train_boxes")+"\n") config_star.write(config_path+"\n") config_star.write(args.p_model+"\n") config_star.close() #Set up the relion pipeline info out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"micrographs_config.star")+" 1\n") out_nodes_star.close() #Symlink the config_cryolo.json file to the main project directory, so that the auto-pipeline can see that it doesn't need to repeat this step again config_basename = os.path.basename(config_path) if os.path.exists(config_basename): os.unlink(config_basename) os.symlink(os.path.join(os.getcwd(),config_path),config_basename) def get_time(): this_time = datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") return this_time
50.509537
281
0.68366
import os,sys,glob,datetime,itertools,subprocess,shutil import numpy as np def do_denoise(args,logfile): starttime = datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") logfile.write("Beginning 'Denoise' task at "+starttime+"\n\n") mics_star_path = args.in_mics mics_star_file = open(mics_star_path,'r') star_lines = mics_star_file.readlines() mics_star_file.close() mic_file_list = [] star_line_list= [] denoised_out_star = open(os.path.join(args.o,"micrographs_denoised.star"),'w') denoised_star_header_complete = False for line in star_lines: first_column = line.split(" ")[0] if "MotionCorr" in first_column: mic_file_list.append(first_column) if not denoised_star_header_complete: denoised_out_star.write("_rlnDenoisedMicrograph #"+str(rln_idx+1)+"\n") denoised_star_header_complete = True #denoised_out_star.write(line[:-1]+"\t") star_line_list.append(line[:-1]+" ") else: denoised_out_star.write(line) if "_rln" in line: #Need to keep track of how many _rlnColumns we have rln_idx = int(line.split(" ")[-2].replace("#","")) #The "-1" idx is the new line character, so the "-2" idx will be "#[rln_idx]" string #Randomly pick args.nmic number of files to denoise etc. mic_idx = np.arange(len(mic_file_list)) denoise_idxs = np.random.choice(mic_idx,size=args.nmic,replace=False) denoise_list = np.array(mic_file_list)[denoise_idxs] denoise_star_lines = np.array(star_line_list)[denoise_idxs] for idx in range(args.nmic): mic_basename = os.path.basename(denoise_list[idx]) denoised_mic = os.path.join(args.o,"denoised/for_picking/"+mic_basename) denoised_out_star.write(denoise_star_lines[idx]+denoised_mic+"\n") denoised_out_star.close() #Create a folder to house the mic list for denoising/manual picking for_pick_folder = os.path.join(args.o,"for_picking") if not os.path.isdir(for_pick_folder): logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": creating folder = "+for_pick_folder+"\n") os.mkdir(for_pick_folder) #If the folder does exist, get rid of old symlinks else: for FILE in glob.glob(os.path.join(for_pick_folder,"*.*")): os.unlink(FILE) #This folder will hold the post-denoising mics after_denoise_folder = os.path.join(args.o,"denoised") if not os.path.isdir(after_denoise_folder): logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": creating folder = "+after_denoise_folder+"\n") os.mkdir(after_denoise_folder) #put symbolic links to the randomly-selected micrographs in to the "for_picking" folder for FILE in denoise_list: #Call os.path.basename to remove the prefix path (maintained by glob.glob call) link_path = os.path.join(for_pick_folder,os.path.basename(FILE)) logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": creating symbolic link for micrograph: "+FILE+" -> "+link_path) os.symlink(os.path.join(os.getcwd(),FILE),link_path) #Now that the preparation work has been done, start denoising micrographs function_call = "janni_denoise.py denoise -ol 24 -bs 4 -g 0 "+for_pick_folder+" " +after_denoise_folder+" "+args.n_model logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": Sending function call = "+function_call+"\n") #Need to split the program and inputs in subprocess call (needs to be list, not string) function_call_split = function_call.split(" ") subprocess.call(function_call_split) #Need to make pipeline .star file information out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"micrographs_denoised.star")+"\t1\n") out_nodes_star.close() def do_manual_pick(args,logfile): starttime = datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") logfile.write("Beginning 'Manual Pick' task at "+starttime+"\n\n") logfile.write("Opening input micrographs star file: "+args.in_mics+"\n") denoised_star = open(args.in_mics,'r') denoised_star_lines = denoised_star.readlines() denoised_star.close() #Generate a folder within the job to link the mics for picking denoise_mic_folder = os.path.join(args.o,"denoised_folder") raw_mic_folder= os.path.join(args.o,"raw_image_folder") manual_pick_folder = os.path.join(args.o,"boxes") #If folders don't exist, then make them logfile.write("Generating folders for symbolic links and particle picks: %s; %s; %s\n" % (denoise_mic_folder, raw_mic_folder, manual_pick_folder)) if not os.path.isdir(denoise_mic_folder): os.mkdir(denoise_mic_folder) if not os.path.isdir(raw_mic_folder): os.mkdir(raw_mic_folder) if not os.path.isdir(manual_pick_folder): os.mkdir(manual_pick_folder) logfile.write("Determining raw and denoised micrographs for manual picking from "+args.in_mics+"\n") for LINE in denoised_star_lines: LINE = LINE.replace("\n","").replace("\t","") if "MotionCorr" in LINE: split_line = LINE.split(" ") link_path_prefix = os.getcwd() raw_rel_path = split_line[0] raw_basename = os.path.basename(raw_rel_path) raw_link_src = os.path.join(link_path_prefix,raw_rel_path) raw_link_dest= os.path.join(raw_mic_folder,raw_basename) currtime= datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") if not os.path.exists(raw_link_dest): if os.path.exists(raw_link_src): logfile.write(currtime+" = creating symbolic link "+raw_link_src+" -> "+raw_link_dest+"\n") os.symlink(raw_link_src,raw_link_dest) else: logfile.write(currtime+" = could not find source for symbolic link ("+raw_link_src+" -> "+raw_link_dest+")\n") else: logfile.write(currtime+" = Symbolic link already present at destination ("+raw_link_dest+")\n") denoise_rel_path = split_line[-1] denoise_basename = os.path.basename(denoise_rel_path) denoise_link_src = os.path.join(link_path_prefix,denoise_rel_path) denoise_link_dest= os.path.join(denoise_mic_folder,denoise_basename) if not os.path.exists(denoise_link_dest): currtime = datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") if os.path.exists(denoise_link_src): logfile.write(currtime+" = creating symbolic link "+denoise_link_src+" -> "+denoise_link_dest+"\n") os.symlink(denoise_link_src,denoise_link_dest) else: logfile.write(currtime+" = could not find source for symbolic link ("+denoise_link_src+" -> "+denoise_link_dest+")\n") else: logfile.write(currtime+" = Symbolic link already present at destination ("+denoise_link_dest+")\n") function_call = "cryolo_boxmanager.py -i "+denoise_mic_folder function_call_split = function_call.split(" ") logfile.write(datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S")+": sending function call = "+function_call+"\n") subprocess.call(function_call_split) #Need to code-in some kind of pipeline.star file for information for training train_star = open(os.path.join(args.o,"micrographs_train_metadata.star"),'w') #The first row will define the path to "train_images" train_star.write(os.path.join(args.o,"raw_image_folder")+"\n") #The second row will define the path to the "train_boxes" folder train_star.write(os.path.join(args.o,"boxes")+"\n") train_star.close() #Star file for managing relion pipeline flow out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"micrographs_train_metadata.star")+" 1\n") out_nodes_star.close() def do_train(args,logfile): starttime = get_time() logfile.write("Beginning 'Training' task at "+starttime+"\n\n") train_paths_star = open(args.in_mics,'r') train_folders = train_paths_star.readlines() train_images = train_folders[0].replace("\n","") train_boxes = train_folders[1].replace("\n","") config_path = train_folders[2].replace("\n","") model_name = train_folders[3].replace("\n","") function_call = "cryolo_gui.py train -c "+config_path+" -nc "+args.j+" -w 5" this_time = get_time() logfile.write(this_time+" = sending function call: "+function_call+"\n") function_call_split = function_call.split(" ") subprocess.call(function_call_split) #Copy the output picking model to the folder, to prevent potential over-writing by future trainings shutil.copyfile(model_name,os.path.join(args.o,model_name)) #Pipeline STAR out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"particles_model.star")+" 3\n") out_nodes_star.close() #STAR file containing meta-data for picking trained_star = open(os.path.join(args.o,"particles_model.star"),'w') trained_star.write(os.path.join(args.o,model_name)+"\n") trained_star.write(config_path+"\n") trained_star.close() def do_predict(args,logfile): starttime = get_time() logfile.write("Beginning 'Predict' task at "+starttime+"\n\n") mic_ctf_star = open(args.in_mics, 'r') cryolo_star = open(args.in_parts,'r') cryolo_lines = cryolo_star.readlines() model_path = cryolo_lines[0].replace("\n","") config_path = cryolo_lines[1].replace("\n","") this_time = get_time() logfile.write(this_time+" = Using model "+model_path+" to pick particles on micrographs in "+args.in_mics+"\n") out_box_folder = os.path.join(args.o,"particles") #Find the micrograph folder from CTF path mic_ctf_star_lines = mic_ctf_star.readlines() #In case there are multiple runs joined together, we are going to look for unique CtfFind/micrographs folders, instead of just using the first we find ctf_mic_path_list = [] CTFcol = 2 #Assume .star file follows default order but still explicitly identify below, just in case for LINE in mic_ctf_star_lines: if "_rlnCtfImage" in LINE: #Have to fix weird spacing issue LINE = LINE.replace(" \n","") CTFcol = int(LINE.split(" ")[-1].replace("#","")) - 1 #Relion is 1-indexed, python is 0-indexed if "MotionCorr" in LINE: while " " in LINE: #since we are splitting by " ", we need to make sure there aren't any multi-" " left LINE = LINE.replace(" "," ") split_line = LINE.split(" ") CTF_full_path = split_line[CTFcol].replace(":mrc","") CTF_mic_folder= os.path.split(CTF_full_path)[0] ctf_mic_path_list.append(CTF_mic_folder) unique_paths = np.unique(np.array(ctf_mic_path_list)) this_time = get_time() logfile.write(this_time+" = Identified the following micrograph path(s): "+str(unique_paths)+"\n") mic_folder = os.path.join(args.o,"micrographs") if not os.path.exists(mic_folder): os.mkdir(mic_folder) for mic_path in unique_paths: full_path = os.path.join(os.getcwd(),mic_path) mic_list = glob.glob(os.path.join(full_path,"*.mrc")) for MIC in mic_list: link_path = os.path.join(mic_folder,os.path.basename(MIC)) if os.path.exists(link_path): os.unlink(link_path) os.symlink(MIC,link_path) out_box_folder = os.path.join(args.o,"boxes") if not os.path.exists(out_box_folder): os.mkdir(out_box_folder) function_call = "cryolo_gui.py predict -c "+config_path+" -w "+model_path+" -i "+mic_folder+" -o "+out_box_folder+" -t "+str(args.threshold)+" -d "+str(args.distance)+" -nc "+args.j this_time = get_time() logfile.write(this_time+" = sending function call: "+function_call+"\n") function_call_split = function_call.split(" ") subprocess.call(function_call_split) out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"coords_suffix_cryolo.star")+" 2\n") out_nodes_star.close() suffix_star = open(os.path.join(args.o,"coords_suffix_cryolo.star"),'w') suffix_star.write(args.in_mics+"\n") suffix_star.close() star_list = glob.glob(os.path.join(args.o,"boxes/STAR/*.star")) for STAR in star_list: full_star_path = os.path.join(os.getcwd(),STAR) link_path = os.path.join(args.o,"micrographs/"+os.path.basename(STAR)).replace(".star","_cryolo.star") link_path.replace(".star","_cryolo.star") if os.path.exists(link_path): os.unlink(link_path) os.symlink(full_star_path,link_path) def do_config_setup(args,logfile): starttime = get_time() logfile.write("Beginning 'Config Setup' task at "+starttime+"\n\n") this_time = get_time() logfile.write(this_time+" = Getting training micrographs and boxes from "+args.in_mics+"\n") train_paths_star = open(args.in_mics,'r') train_folders = train_paths_star.readlines() train_images = train_folders[0].replace("\n","") train_boxes = train_folders[1].replace("\n","") this_time = get_time() logfile.write(this_time+" = Train images ("+train_images+") and training boxes ("+train_boxes+") folders identified.\n") this_time = get_time() logfile.write(this_time+" = Getting box size from training boxes .box files\n") train_box_files = glob.glob(os.path.join(train_boxes,"*.box")) x_coord,y_coord,xbox,ybox = np.genfromtxt(train_box_files[0],dtype=int,unpack=True) boxsize = np.copy(np.unique(xbox)[0]) config_path = os.path.join(args.o,"config_cryolo.json") function_call = "cryolo_gui.py config --train_image_folder "+train_images+" --train_annot_folder "+train_boxes+" --saved_weights_name "+args.p_model+" --filter JANNI --janni_model "+args.n_model+" --log_path "+os.path.join(args.o,"cryolo_log.log "+config_path+" "+str(boxsize)) this_time = get_time() logfile.write(this_time+" = Sending function call: "+function_call+"\n") function_call_split = function_call.split(" ") subprocess.call(function_call_split) box_src = os.path.join(os.getcwd(),train_boxes) box_dest= os.path.join(args.o,"train_boxes") if os.path.exists(box_dest): os.unlink(box_dest) os.symlink(box_src,box_dest) mic_src = os.path.join(os.getcwd(),train_images) mic_dest= os.path.join(args.o,"train_images") if os.path.exists(mic_dest): os.unlink(mic_dest) os.symlink(mic_src,mic_dest) config_star_path = os.path.join(args.o,"micrographs_config.star") config_star = open(config_star_path,'w') config_star.write(os.path.join(args.o,"train_images")+"\n") config_star.write(os.path.join(args.o,"train_boxes")+"\n") config_star.write(config_path+"\n") config_star.write(args.p_model+"\n") config_star.close() out_nodes_star = open(os.path.join(args.o,"RELION_OUTPUT_NODES.star"),'w') out_nodes_star.write("data_output_nodes\n") out_nodes_star.write("loop_\n") out_nodes_star.write("_rlnPipeLineNodeName #1\n") out_nodes_star.write("_rlnPipeLineNodeType #2\n") out_nodes_star.write(os.path.join(args.o,"micrographs_config.star")+" 1\n") out_nodes_star.close() config_basename = os.path.basename(config_path) if os.path.exists(config_basename): os.unlink(config_basename) os.symlink(os.path.join(os.getcwd(),config_path),config_basename) def get_time(): this_time = datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S") return this_time
true
true
1c354661a0b5ab8bc181ef7729f978760c4b1d9e
192
py
Python
tests/test_docser.py
chrisbrake/docser
3421632f741f075c533db02757b5a778350589f9
[ "BSD-3-Clause" ]
null
null
null
tests/test_docser.py
chrisbrake/docser
3421632f741f075c533db02757b5a778350589f9
[ "BSD-3-Clause" ]
null
null
null
tests/test_docser.py
chrisbrake/docser
3421632f741f075c533db02757b5a778350589f9
[ "BSD-3-Clause" ]
null
null
null
import pytest @pytest.fixture() def docser(): """ Get docser for testing """ import docser return docser def test_import(docser): """ Test importable """ assert docser
13.714286
34
0.640625
import pytest @pytest.fixture() def docser(): import docser return docser def test_import(docser): assert docser
true
true
1c3546cd36759c7190de9c4a0c3e8fc967d1834f
3,864
py
Python
qiskit/extensions/quantum_initializer/ucy.py
tareqdandachi/qiskit-terra
5221fe330adba5529bfa22dc25262ac8e6291aaf
[ "Apache-2.0" ]
3
2019-05-19T17:39:38.000Z
2020-01-28T19:59:18.000Z
qiskit/extensions/quantum_initializer/ucy.py
tareqdandachi/qiskit-terra
5221fe330adba5529bfa22dc25262ac8e6291aaf
[ "Apache-2.0" ]
4
2019-05-13T15:28:46.000Z
2019-12-19T20:47:02.000Z
qiskit/extensions/quantum_initializer/ucy.py
tareqdandachi/qiskit-terra
5221fe330adba5529bfa22dc25262ac8e6291aaf
[ "Apache-2.0" ]
1
2021-07-07T16:55:41.000Z
2021-07-07T16:55:41.000Z
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Implementation of the abstract class UCRot for uniformly controlled (also called multiplexed) single-qubit rotations around the Y-axes (i.e., uniformly controlled R_y rotations). These gates can have several control qubits and a single target qubit. If the k control qubits are in the state ket(i) (in the computational bases), a single-qubit rotation R_y(a_i) is applied to the target qubit. """ import math from qiskit import QuantumRegister, QiskitError from qiskit.circuit.quantumcircuit import QuantumCircuit from qiskit.extensions.quantum_initializer.ucrot import UCRot class UCY(UCRot): """ Uniformly controlled rotations (also called multiplexed rotations). The decomposition is based on 'Synthesis of Quantum Logic Circuits' by V. Shende et al. (https://arxiv.org/pdf/quant-ph/0406176.pdf) Input: angle_list = list of (real) rotation angles [a_0,...,a_{2^k-1}] """ def __init__(self, angle_list): super().__init__(angle_list, "Y") def ucy(self, angle_list, q_controls, q_target): """Attach a uniformly controlled (also called multiplexed) Ry rotation gate to a circuit. The decomposition is base on https://arxiv.org/pdf/quant-ph/0406176.pdf by Shende et al. Args: angle_list (list[numbers): list of (real) rotation angles [a_0,...,a_{2^k-1}] q_controls (QuantumRegister|list[Qubit]): list of k control qubits (or empty list if no controls). The control qubits are ordered according to their significance in increasing order: For example if q_controls=[q[1],q[2]] (with q = QuantumRegister(2)), the rotation Ry(a_0)is performed if q[1] and q[2] are in the state zero, the rotation Ry(a_1) is performed if q[1] is in the state one and q[2] is in the state zero, and so on q_target (QuantumRegister|Qubit): target qubit, where we act on with the single-qubit rotation gates Returns: QuantumCircuit: the uniformly controlled rotation gate is attached to the circuit. Raises: QiskitError: if the list number of control qubits does not correspond to the provided number of single-qubit unitaries; if an input is of the wrong type """ if isinstance(q_controls, QuantumRegister): q_controls = q_controls[:] if isinstance(q_target, QuantumRegister): q_target = q_target[:] if len(q_target) == 1: q_target = q_target[0] else: raise QiskitError("The target qubit is a QuantumRegister containing" " more than one qubits.") # Check if q_controls has type "list" if not isinstance(angle_list, list): raise QiskitError("The angles must be provided as a list.") num_contr = math.log2(len(angle_list)) if num_contr < 0 or not num_contr.is_integer(): raise QiskitError("The number of controlled rotation gates is not" " a non-negative power of 2.") # Check if number of control qubits does correspond to the number of rotations if num_contr != len(q_controls): raise QiskitError("Number of controlled rotations does not correspond to" " the number of control-qubits.") return self.append(UCY(angle_list), [q_target] + q_controls, []) QuantumCircuit.ucy = ucy
41.548387
93
0.689182
import math from qiskit import QuantumRegister, QiskitError from qiskit.circuit.quantumcircuit import QuantumCircuit from qiskit.extensions.quantum_initializer.ucrot import UCRot class UCY(UCRot): def __init__(self, angle_list): super().__init__(angle_list, "Y") def ucy(self, angle_list, q_controls, q_target): if isinstance(q_controls, QuantumRegister): q_controls = q_controls[:] if isinstance(q_target, QuantumRegister): q_target = q_target[:] if len(q_target) == 1: q_target = q_target[0] else: raise QiskitError("The target qubit is a QuantumRegister containing" " more than one qubits.") if not isinstance(angle_list, list): raise QiskitError("The angles must be provided as a list.") num_contr = math.log2(len(angle_list)) if num_contr < 0 or not num_contr.is_integer(): raise QiskitError("The number of controlled rotation gates is not" " a non-negative power of 2.") if num_contr != len(q_controls): raise QiskitError("Number of controlled rotations does not correspond to" " the number of control-qubits.") return self.append(UCY(angle_list), [q_target] + q_controls, []) QuantumCircuit.ucy = ucy
true
true
1c3548998c612213f500cd3a9cd72c539e5ad7f3
358
py
Python
smafer/model/base.py
IvanStrazov/smafer
cb7d9d61825aafc77ca05dc19e5fecb7d1632d16
[ "MIT" ]
null
null
null
smafer/model/base.py
IvanStrazov/smafer
cb7d9d61825aafc77ca05dc19e5fecb7d1632d16
[ "MIT" ]
null
null
null
smafer/model/base.py
IvanStrazov/smafer
cb7d9d61825aafc77ca05dc19e5fecb7d1632d16
[ "MIT" ]
null
null
null
# utf-8 # Python 3.9 # 2021-05-03 from abc import abstractmethod, ABC import pandas as pd class __BaseModel(ABC): @abstractmethod def __init__(self, estimator, *args, **kwargs) -> None: pass @abstractmethod def fit(self, *args, **kwargs) -> pd.Series: pass @abstractmethod def predict(self, *args, **kwargs) -> pd.Series: pass
17.9
64
0.662011
from abc import abstractmethod, ABC import pandas as pd class __BaseModel(ABC): @abstractmethod def __init__(self, estimator, *args, **kwargs) -> None: pass @abstractmethod def fit(self, *args, **kwargs) -> pd.Series: pass @abstractmethod def predict(self, *args, **kwargs) -> pd.Series: pass
true
true
1c354959b30f4bf8c734c6341435f0425bcbf8a1
1,024
py
Python
app/app/urls.py
osamada/recipe-app-api
9246e955392bd648539f20619b3039ea3d24b6d5
[ "MIT" ]
null
null
null
app/app/urls.py
osamada/recipe-app-api
9246e955392bd648539f20619b3039ea3d24b6d5
[ "MIT" ]
null
null
null
app/app/urls.py
osamada/recipe-app-api
9246e955392bd648539f20619b3039ea3d24b6d5
[ "MIT" ]
null
null
null
"""app URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include from django.conf.urls.static import static from django.conf import settings urlpatterns = [ path('admin/', admin.site.urls), path('api/user/', include('user.urls')), path('api/recipe/', include('recipe.urls')), path('', include('home.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
37.925926
77
0.709961
from django.contrib import admin from django.urls import path, include from django.conf.urls.static import static from django.conf import settings urlpatterns = [ path('admin/', admin.site.urls), path('api/user/', include('user.urls')), path('api/recipe/', include('recipe.urls')), path('', include('home.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
true
true
1c354a615dd5e8945e46ebed1cc944d5cf077255
18,002
py
Python
supports/pyload/src/pyload/plugins/base/simple_downloader.py
LuckyNicky/pycrawler
4b3fe2f6e8e51f236d95a64a89a44199e4e97743
[ "Apache-2.0" ]
1
2020-04-02T17:03:39.000Z
2020-04-02T17:03:39.000Z
supports/pyload/src/pyload/plugins/base/simple_downloader.py
LuckyNicky/pycrawler
4b3fe2f6e8e51f236d95a64a89a44199e4e97743
[ "Apache-2.0" ]
null
null
null
supports/pyload/src/pyload/plugins/base/simple_downloader.py
LuckyNicky/pycrawler
4b3fe2f6e8e51f236d95a64a89a44199e4e97743
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import os import re from ...core.network.http.exceptions import BadHeader from ...core.network.request_factory import get_url from ...core.utils import parse from ...core.utils.old import parse_name from ..helpers import replace_patterns from .downloader import BaseDownloader class SimpleDownloader(BaseDownloader): __name__ = "SimpleDownloader" __type__ = "downloader" __version__ = "2.27" __status__ = "stable" __pyload_version__ = "0.5" __pattern__ = r"^unmatchable$" __config__ = [ ("enabled", "bool", "Activated", True), ("use_premium", "bool", "Use premium account if available", True), ("fallback", "bool", "Fallback to free download if premium fails", True), ("chk_filesize", "bool", "Check file size", True), ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10), ] __description__ = """Simple downloader plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] """ Info patterns: INFO_PATTERN: (mandatory) Name and Size of the file example: INFO_PATTERN = r'(?P<N>file_name) (?P<S>file_size) (?P<U>size_unit)' or NAME_PATTERN: (mandatory) Name that will be set for the file example: NAME_PATTERN = r'(?P<N>file_name)' SIZE_PATTERN: (mandatory) Size that will be checked for the file example: SIZE_PATTERN = r'(?P<S>file_size) (?P<U>size_unit)' HASHSUM_PATTERN: (optional) Hash code and type of the file example: HASHSUM_PATTERN = r'(?P<D>hash_digest) (?P<H>MD5)' OFFLINE_PATTERN: (mandatory) Check if the page is unreachable example: OFFLINE_PATTERN = r'File (deleted|not found)' TEMP_OFFLINE_PATTERN: (optional) Check if the page is temporarily unreachable example: TEMP_OFFLINE_PATTERN = r'Server (maintenance|maintainance)' Error patterns: WAIT_PATTERN: (optional) Detect waiting time example: WAIT_PATTERN = r'' PREMIUM_ONLY_PATTERN: (optional) Check if the file can be downloaded only with a premium account example: PREMIUM_ONLY_PATTERN = r'Premium account required' HAPPY_HOUR_PATTERN: (optional) example: HAPPY_HOUR_PATTERN = r'Happy hour' IP_BLOCKED_PATTERN: (optional) example: IP_BLOCKED_PATTERN = r'in your country' DL_LIMIT_PATTERN: (optional) example: DL_LIMIT_PATTERN = r'download limit' SIZE_LIMIT_PATTERN: (optional) example: SIZE_LIMIT_PATTERN = r'up to' ERROR_PATTERN: (optional) Detect any error preventing download example: ERROR_PATTERN = r'' Instead overriding handle_free and handle_premium methods you may define the following patterns for basic link handling: LINK_PATTERN: (optional) group(1) should be the direct link for free and premium download example: LINK_PATTERN = r'<div class="link"><a href="(.+?)"' or LINK_FREE_PATTERN: (optional) group(1) should be the direct link for free download example: LINK_FREE_PATTERN = r'<div class="link"><a href="(.+?)"' LINK_PREMIUM_PATTERN: (optional) group(1) should be the direct link for premium download example: LINK_PREMIUM_PATTERN = r'<div class="link"><a href="(.+?)"' """ NAME_REPLACEMENTS = [] SIZE_REPLACEMENTS = [] URL_REPLACEMENTS = [] #: Set to False to not check the last downloaded file with declared error patterns CHECK_FILE = True CHECK_TRAFFIC = ( False ) #: Set to True to reload checking traffic left for premium account COOKIES = True #: or False or list of tuples [(domain, name, value)] #: Set to True to looking for direct link (as defined in handle_direct method), set to None to do it if self.account is True else False DIRECT_LINK = True #: Set to True to use any content-disposition value found in http header as file name DISPOSITION = True LOGIN_ACCOUNT = False #: Set to True to require account login LOGIN_PREMIUM = False #: Set to True to require premium account login #: Set to True to leech other hoster link (as defined in handle_multi method) LEECH_HOSTER = False #: Set to encoding name if encoding value in http header is not correct TEXT_ENCODING = True # TRANSLATE_ERROR = True LINK_PATTERN = None LINK_FREE_PATTERN = None LINK_PREMIUM_PATTERN = None INFO_PATTERN = None NAME_PATTERN = None SIZE_PATTERN = None HASHSUM_PATTERN = r"[^\w](?P<H>(CRC|crc)(-?32)?|(MD|md)-?5|(SHA|sha)-?(1|224|256|384|512)).*(:|=|>)[ ]*(?P<D>(?:[a-z0-9]|[A-Z0-9]){8,})" OFFLINE_PATTERN = r"[^\w](404\s|[Ii]nvalid|[Oo]ffline|[Dd]elet|[Rr]emov|([Nn]o(t|thing)?|sn\'t) (found|(longer )?(available|exist)))" TEMP_OFFLINE_PATTERN = ( r"[^\w](503\s|[Mm]aint(e|ai)nance|[Tt]emp([.-]|orarily)|[Mm]irror)" ) WAIT_PATTERN = None PREMIUM_ONLY_PATTERN = None HAPPY_HOUR_PATTERN = None IP_BLOCKED_PATTERN = None DL_LIMIT_PATTERN = None SIZE_LIMIT_PATTERN = None ERROR_PATTERN = None FILE_ERRORS = [ ( "Html error", r"\A(?:\s*<.+>)?((?:[\w\s]*(?:[Ee]rror|ERROR)\s*\:?)?\s*\d{3})(?:\Z|\s+)", ), ("Request error", r"([Aa]n error occured while processing your request)"), ("Html file", r"\A\s*<!DOCTYPE html"), ] @classmethod def api_info(cls, url): return {} @classmethod def get_info(cls, url="", html=""): info = super(SimpleDownloader, cls).get_info(url) info.update(cls.api_info(url)) if not html and info["status"] != 2: if not url: info["error"] = "missing url" info["status"] = 1 elif info["status"] in (3, 7): try: html = get_url(url, cookies=cls.COOKIES, decode=cls.TEXT_ENCODING) except BadHeader as exc: info["error"] = "{}: {}".format(exc.code, exc.content) except Exception: pass if html: if cls.OFFLINE_PATTERN and re.search(cls.OFFLINE_PATTERN, html) is not None: info["status"] = 1 elif ( cls.TEMP_OFFLINE_PATTERN and re.search(cls.TEMP_OFFLINE_PATTERN, html) is not None ): info["status"] = 6 else: for pattern in ( "INFO_PATTERN", "NAME_PATTERN", "SIZE_PATTERN", "HASHSUM_PATTERN", ): try: attr = getattr(cls, pattern) pdict = re.search(attr, html).groupdict() if all(True for k in pdict if k not in info["pattern"]): info["pattern"].update(pdict) except Exception: continue else: info["status"] = 2 if "N" in info["pattern"]: name = replace_patterns(info["pattern"]["N"], cls.NAME_REPLACEMENTS) info["name"] = parse_name(name) if "S" in info["pattern"]: size = replace_patterns( info["pattern"]["S"] + info["pattern"]["U"] if "U" in info["pattern"] else info["pattern"]["S"], cls.SIZE_REPLACEMENTS, ) info["size"] = parse.bytesize(size) elif isinstance(info["size"], str): unit = info["units"] if "units" in info else "" info["size"] = parse.bytesize(info["size"], unit) if "H" in info["pattern"]: hash_type = info["pattern"]["H"].strip("-").upper() info["hash"][hash_type] = info["pattern"]["D"] return info def setup(self): self.multi_dl = self.premium self.resume_download = self.premium def _prepare(self): self.link = "" self.direct_dl = False self.leech_dl = False if self.LOGIN_PREMIUM: self.no_fallback = True if not self.premium: self.fail(self._("Required premium account not found")) if self.LOGIN_ACCOUNT and not self.account: self.fail(self._("Required account not found")) self.req.set_option("timeout", 120) if self.LINK_PATTERN: if self.LINK_FREE_PATTERN is None: self.LINK_FREE_PATTERN = self.LINK_PATTERN if self.LINK_PREMIUM_PATTERN is None: self.LINK_PREMIUM_PATTERN = self.LINK_PATTERN if self.LEECH_HOSTER: pattern = self.pyload.plugin_manager.hoster_plugins.get(self.classname)[ "pattern" ] if ( self.__pattern__ != pattern and re.match(self.__pattern__, self.pyfile.url) is None ): self.leech_dl = True if self.leech_dl: self.direct_dl = False elif self.DIRECT_LINK is None: self.direct_dl = bool(self.premium) else: self.direct_dl = self.DIRECT_LINK if not self.leech_dl: self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS) def _preload(self): if self.data: return self.data = self.load( self.pyfile.url, cookies=self.COOKIES, ref=False, decode=self.TEXT_ENCODING ) def process(self, pyfile): self._prepare() # TODO: Remove `handle_multi`, use MultiDownloader instead if self.leech_dl: self.log_info(self._("Processing as debrid download...")) self.handle_multi(pyfile) else: if not self.link and self.direct_dl: self.log_info(self._("Looking for direct download link...")) self.handle_direct(pyfile) if self.link: self.log_info(self._("Direct download link detected")) else: self.log_info(self._("Direct download link not found")) if not self.link: self._preload() self.check_errors() if self.info.get("status", 7) != 2: self.grab_info() self.check_status() self.check_duplicates() if self.premium and ( not self.CHECK_TRAFFIC or not self.out_of_traffic() ): self.log_info(self._("Processing as premium download...")) self.handle_premium(pyfile) elif not self.LOGIN_ACCOUNT or ( not self.CHECK_TRAFFIC or not self.out_of_traffic() ): self.log_info(self._("Processing as free download...")) self.handle_free(pyfile) if self.link and not self.last_download: self.log_info(self._("Downloading file...")) self.download(self.link, disposition=self.DISPOSITION) def _check_download(self): super()._check_download() self.check_download() def check_download(self): self.log_info(self._("Checking file (with built-in rules)...")) for r, p in self.FILE_ERRORS: errmsg = self.scan_download({r: re.compile(p)}) if errmsg is not None: errmsg = errmsg.strip().capitalize() try: errmsg += " | " + self.last_check.group(1).strip() except Exception: pass self.log_warning( self._("Check result: ") + errmsg, self._("Waiting 1 minute and retry"), ) self.wait(60, reconnect=True) self.restart(errmsg) else: if self.CHECK_FILE: self.log_info(self._("Checking file (with custom rules)...")) with open(os.fsdecode(self.last_download), mode="rb") as fp: self.data = fp.read(1_048_576) # TODO: Recheck in 0.6.x self.check_errors() self.log_info(self._("No errors found")) def check_errors(self): self.log_info(self._("Checking for link errors...")) if not self.data: self.log_warning(self._("No data to check")) return if self.IP_BLOCKED_PATTERN and re.search(self.IP_BLOCKED_PATTERN, self.data): self.fail(self._("Connection from your current IP address is not allowed")) elif not self.premium: if self.PREMIUM_ONLY_PATTERN and re.search( self.PREMIUM_ONLY_PATTERN, self.data ): self.fail(self._("File can be downloaded by premium users only")) elif self.SIZE_LIMIT_PATTERN and re.search( self.SIZE_LIMIT_PATTERN, self.data ): self.fail(self._("File too large for free download")) elif self.DL_LIMIT_PATTERN and re.search(self.DL_LIMIT_PATTERN, self.data): m = re.search(self.DL_LIMIT_PATTERN, self.data) try: errmsg = m.group(1) except (AttributeError, IndexError): errmsg = m.group(0) finally: errmsg = re.sub(r"<.*?>", " ", errmsg.strip()) self.info["error"] = errmsg self.log_warning(errmsg) wait_time = parse.seconds(errmsg) self.wait( wait_time, reconnect=wait_time > self.config.get("max_wait", 10) * 60, ) self.restart(self._("Download limit exceeded")) if self.HAPPY_HOUR_PATTERN and re.search(self.HAPPY_HOUR_PATTERN, self.data): self.multi_dl = True if self.ERROR_PATTERN: m = re.search(self.ERROR_PATTERN, self.data) if m is not None: try: errmsg = m.group(1).strip() except (AttributeError, IndexError): errmsg = m.group(0).strip() finally: errmsg = re.sub(r"<.*?>", " ", errmsg) self.info["error"] = errmsg self.log_warning(errmsg) if re.search(self.TEMP_OFFLINE_PATTERN, errmsg): self.temp_offline() elif re.search(self.OFFLINE_PATTERN, errmsg): self.offline() elif re.search(r"limit|wait|slot", errmsg, re.I): wait_time = parse.seconds(errmsg) self.wait( wait_time, reconnect=wait_time > self.config.get("max_wait", 10) * 60, ) self.restart(self._("Download limit exceeded")) elif re.search(r"country|ip|region|nation", errmsg, re.I): self.fail( self._("Connection from your current IP address is not allowed") ) elif re.search(r"captcha|code", errmsg, re.I): self.retry_captcha() elif re.search(r"countdown|expired", errmsg, re.I): self.retry(10, 60, self._("Link expired")) elif re.search(r"503|maint(e|ai)nance|temp|mirror", errmsg, re.I): self.temp_offline() elif re.search(r"up to|size", errmsg, re.I): self.fail(self._("File too large for free download")) elif re.search( r"404|sorry|offline|delet|remov|(no(t|thing)?|sn\'t) (found|(longer )?(available|exist))", errmsg, re.I, ): self.offline() elif re.search(r"filename", errmsg, re.I): self.fail(self._("Invalid url")) elif re.search(r"premium", errmsg, re.I): self.fail(self._("File can be downloaded by premium users only")) else: self.wait(60, reconnect=True) self.restart(errmsg) elif self.WAIT_PATTERN: m = re.search(self.WAIT_PATTERN, self.data) if m is not None: try: waitmsg = m.group(1).strip() except (AttributeError, IndexError): waitmsg = m.group(0).strip() wait_time = parse.seconds(waitmsg) self.wait( wait_time, reconnect=wait_time > self.config.get("max_wait", 10) * 60, ) self.log_info(self._("No errors found")) self.info.pop("error", None) #: Deprecated method (Remove in 0.6.x) def get_file_info(self): self.info.clear() self.grab_info() return self.info def handle_direct(self, pyfile): self.link = pyfile.url if self.isresource(pyfile.url) else None def handle_multi(self, pyfile): #: Multi-hoster handler pass def handle_free(self, pyfile): if not self.LINK_FREE_PATTERN: self.fail(self._("Free download not implemented")) m = re.search(self.LINK_FREE_PATTERN, self.data) if m is None: self.error(self._("Free download link not found")) else: self.link = m.group(1) def handle_premium(self, pyfile): if not self.LINK_PREMIUM_PATTERN: self.log_warning(self._("Premium download not implemented")) self.restart(premium=False) m = re.search(self.LINK_PREMIUM_PATTERN, self.data) if m is None: self.error(self._("Premium download link not found")) else: self.link = m.group(1)
35.160156
140
0.55205
import os import re from ...core.network.http.exceptions import BadHeader from ...core.network.request_factory import get_url from ...core.utils import parse from ...core.utils.old import parse_name from ..helpers import replace_patterns from .downloader import BaseDownloader class SimpleDownloader(BaseDownloader): __name__ = "SimpleDownloader" __type__ = "downloader" __version__ = "2.27" __status__ = "stable" __pyload_version__ = "0.5" __pattern__ = r"^unmatchable$" __config__ = [ ("enabled", "bool", "Activated", True), ("use_premium", "bool", "Use premium account if available", True), ("fallback", "bool", "Fallback to free download if premium fails", True), ("chk_filesize", "bool", "Check file size", True), ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10), ] __description__ = """Simple downloader plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] NAME_REPLACEMENTS = [] SIZE_REPLACEMENTS = [] URL_REPLACEMENTS = [] CHECK_FILE = True CHECK_TRAFFIC = ( False ) COOKIES = True DIRECT_LINK = True DISPOSITION = True LOGIN_ACCOUNT = False LOGIN_PREMIUM = False LEECH_HOSTER = False TEXT_ENCODING = True LINK_PATTERN = None LINK_FREE_PATTERN = None LINK_PREMIUM_PATTERN = None INFO_PATTERN = None NAME_PATTERN = None SIZE_PATTERN = None HASHSUM_PATTERN = r"[^\w](?P<H>(CRC|crc)(-?32)?|(MD|md)-?5|(SHA|sha)-?(1|224|256|384|512)).*(:|=|>)[ ]*(?P<D>(?:[a-z0-9]|[A-Z0-9]){8,})" OFFLINE_PATTERN = r"[^\w](404\s|[Ii]nvalid|[Oo]ffline|[Dd]elet|[Rr]emov|([Nn]o(t|thing)?|sn\'t) (found|(longer )?(available|exist)))" TEMP_OFFLINE_PATTERN = ( r"[^\w](503\s|[Mm]aint(e|ai)nance|[Tt]emp([.-]|orarily)|[Mm]irror)" ) WAIT_PATTERN = None PREMIUM_ONLY_PATTERN = None HAPPY_HOUR_PATTERN = None IP_BLOCKED_PATTERN = None DL_LIMIT_PATTERN = None SIZE_LIMIT_PATTERN = None ERROR_PATTERN = None FILE_ERRORS = [ ( "Html error", r"\A(?:\s*<.+>)?((?:[\w\s]*(?:[Ee]rror|ERROR)\s*\:?)?\s*\d{3})(?:\Z|\s+)", ), ("Request error", r"([Aa]n error occured while processing your request)"), ("Html file", r"\A\s*<!DOCTYPE html"), ] @classmethod def api_info(cls, url): return {} @classmethod def get_info(cls, url="", html=""): info = super(SimpleDownloader, cls).get_info(url) info.update(cls.api_info(url)) if not html and info["status"] != 2: if not url: info["error"] = "missing url" info["status"] = 1 elif info["status"] in (3, 7): try: html = get_url(url, cookies=cls.COOKIES, decode=cls.TEXT_ENCODING) except BadHeader as exc: info["error"] = "{}: {}".format(exc.code, exc.content) except Exception: pass if html: if cls.OFFLINE_PATTERN and re.search(cls.OFFLINE_PATTERN, html) is not None: info["status"] = 1 elif ( cls.TEMP_OFFLINE_PATTERN and re.search(cls.TEMP_OFFLINE_PATTERN, html) is not None ): info["status"] = 6 else: for pattern in ( "INFO_PATTERN", "NAME_PATTERN", "SIZE_PATTERN", "HASHSUM_PATTERN", ): try: attr = getattr(cls, pattern) pdict = re.search(attr, html).groupdict() if all(True for k in pdict if k not in info["pattern"]): info["pattern"].update(pdict) except Exception: continue else: info["status"] = 2 if "N" in info["pattern"]: name = replace_patterns(info["pattern"]["N"], cls.NAME_REPLACEMENTS) info["name"] = parse_name(name) if "S" in info["pattern"]: size = replace_patterns( info["pattern"]["S"] + info["pattern"]["U"] if "U" in info["pattern"] else info["pattern"]["S"], cls.SIZE_REPLACEMENTS, ) info["size"] = parse.bytesize(size) elif isinstance(info["size"], str): unit = info["units"] if "units" in info else "" info["size"] = parse.bytesize(info["size"], unit) if "H" in info["pattern"]: hash_type = info["pattern"]["H"].strip("-").upper() info["hash"][hash_type] = info["pattern"]["D"] return info def setup(self): self.multi_dl = self.premium self.resume_download = self.premium def _prepare(self): self.link = "" self.direct_dl = False self.leech_dl = False if self.LOGIN_PREMIUM: self.no_fallback = True if not self.premium: self.fail(self._("Required premium account not found")) if self.LOGIN_ACCOUNT and not self.account: self.fail(self._("Required account not found")) self.req.set_option("timeout", 120) if self.LINK_PATTERN: if self.LINK_FREE_PATTERN is None: self.LINK_FREE_PATTERN = self.LINK_PATTERN if self.LINK_PREMIUM_PATTERN is None: self.LINK_PREMIUM_PATTERN = self.LINK_PATTERN if self.LEECH_HOSTER: pattern = self.pyload.plugin_manager.hoster_plugins.get(self.classname)[ "pattern" ] if ( self.__pattern__ != pattern and re.match(self.__pattern__, self.pyfile.url) is None ): self.leech_dl = True if self.leech_dl: self.direct_dl = False elif self.DIRECT_LINK is None: self.direct_dl = bool(self.premium) else: self.direct_dl = self.DIRECT_LINK if not self.leech_dl: self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS) def _preload(self): if self.data: return self.data = self.load( self.pyfile.url, cookies=self.COOKIES, ref=False, decode=self.TEXT_ENCODING ) def process(self, pyfile): self._prepare() # TODO: Remove `handle_multi`, use MultiDownloader instead if self.leech_dl: self.log_info(self._("Processing as debrid download...")) self.handle_multi(pyfile) else: if not self.link and self.direct_dl: self.log_info(self._("Looking for direct download link...")) self.handle_direct(pyfile) if self.link: self.log_info(self._("Direct download link detected")) else: self.log_info(self._("Direct download link not found")) if not self.link: self._preload() self.check_errors() if self.info.get("status", 7) != 2: self.grab_info() self.check_status() self.check_duplicates() if self.premium and ( not self.CHECK_TRAFFIC or not self.out_of_traffic() ): self.log_info(self._("Processing as premium download...")) self.handle_premium(pyfile) elif not self.LOGIN_ACCOUNT or ( not self.CHECK_TRAFFIC or not self.out_of_traffic() ): self.log_info(self._("Processing as free download...")) self.handle_free(pyfile) if self.link and not self.last_download: self.log_info(self._("Downloading file...")) self.download(self.link, disposition=self.DISPOSITION) def _check_download(self): super()._check_download() self.check_download() def check_download(self): self.log_info(self._("Checking file (with built-in rules)...")) for r, p in self.FILE_ERRORS: errmsg = self.scan_download({r: re.compile(p)}) if errmsg is not None: errmsg = errmsg.strip().capitalize() try: errmsg += " | " + self.last_check.group(1).strip() except Exception: pass self.log_warning( self._("Check result: ") + errmsg, self._("Waiting 1 minute and retry"), ) self.wait(60, reconnect=True) self.restart(errmsg) else: if self.CHECK_FILE: self.log_info(self._("Checking file (with custom rules)...")) with open(os.fsdecode(self.last_download), mode="rb") as fp: self.data = fp.read(1_048_576) # TODO: Recheck in 0.6.x self.check_errors() self.log_info(self._("No errors found")) def check_errors(self): self.log_info(self._("Checking for link errors...")) if not self.data: self.log_warning(self._("No data to check")) return if self.IP_BLOCKED_PATTERN and re.search(self.IP_BLOCKED_PATTERN, self.data): self.fail(self._("Connection from your current IP address is not allowed")) elif not self.premium: if self.PREMIUM_ONLY_PATTERN and re.search( self.PREMIUM_ONLY_PATTERN, self.data ): self.fail(self._("File can be downloaded by premium users only")) elif self.SIZE_LIMIT_PATTERN and re.search( self.SIZE_LIMIT_PATTERN, self.data ): self.fail(self._("File too large for free download")) elif self.DL_LIMIT_PATTERN and re.search(self.DL_LIMIT_PATTERN, self.data): m = re.search(self.DL_LIMIT_PATTERN, self.data) try: errmsg = m.group(1) except (AttributeError, IndexError): errmsg = m.group(0) finally: errmsg = re.sub(r"<.*?>", " ", errmsg.strip()) self.info["error"] = errmsg self.log_warning(errmsg) wait_time = parse.seconds(errmsg) self.wait( wait_time, reconnect=wait_time > self.config.get("max_wait", 10) * 60, ) self.restart(self._("Download limit exceeded")) if self.HAPPY_HOUR_PATTERN and re.search(self.HAPPY_HOUR_PATTERN, self.data): self.multi_dl = True if self.ERROR_PATTERN: m = re.search(self.ERROR_PATTERN, self.data) if m is not None: try: errmsg = m.group(1).strip() except (AttributeError, IndexError): errmsg = m.group(0).strip() finally: errmsg = re.sub(r"<.*?>", " ", errmsg) self.info["error"] = errmsg self.log_warning(errmsg) if re.search(self.TEMP_OFFLINE_PATTERN, errmsg): self.temp_offline() elif re.search(self.OFFLINE_PATTERN, errmsg): self.offline() elif re.search(r"limit|wait|slot", errmsg, re.I): wait_time = parse.seconds(errmsg) self.wait( wait_time, reconnect=wait_time > self.config.get("max_wait", 10) * 60, ) self.restart(self._("Download limit exceeded")) elif re.search(r"country|ip|region|nation", errmsg, re.I): self.fail( self._("Connection from your current IP address is not allowed") ) elif re.search(r"captcha|code", errmsg, re.I): self.retry_captcha() elif re.search(r"countdown|expired", errmsg, re.I): self.retry(10, 60, self._("Link expired")) elif re.search(r"503|maint(e|ai)nance|temp|mirror", errmsg, re.I): self.temp_offline() elif re.search(r"up to|size", errmsg, re.I): self.fail(self._("File too large for free download")) elif re.search( r"404|sorry|offline|delet|remov|(no(t|thing)?|sn\'t) (found|(longer )?(available|exist))", errmsg, re.I, ): self.offline() elif re.search(r"filename", errmsg, re.I): self.fail(self._("Invalid url")) elif re.search(r"premium", errmsg, re.I): self.fail(self._("File can be downloaded by premium users only")) else: self.wait(60, reconnect=True) self.restart(errmsg) elif self.WAIT_PATTERN: m = re.search(self.WAIT_PATTERN, self.data) if m is not None: try: waitmsg = m.group(1).strip() except (AttributeError, IndexError): waitmsg = m.group(0).strip() wait_time = parse.seconds(waitmsg) self.wait( wait_time, reconnect=wait_time > self.config.get("max_wait", 10) * 60, ) self.log_info(self._("No errors found")) self.info.pop("error", None) def get_file_info(self): self.info.clear() self.grab_info() return self.info def handle_direct(self, pyfile): self.link = pyfile.url if self.isresource(pyfile.url) else None def handle_multi(self, pyfile): pass def handle_free(self, pyfile): if not self.LINK_FREE_PATTERN: self.fail(self._("Free download not implemented")) m = re.search(self.LINK_FREE_PATTERN, self.data) if m is None: self.error(self._("Free download link not found")) else: self.link = m.group(1) def handle_premium(self, pyfile): if not self.LINK_PREMIUM_PATTERN: self.log_warning(self._("Premium download not implemented")) self.restart(premium=False) m = re.search(self.LINK_PREMIUM_PATTERN, self.data) if m is None: self.error(self._("Premium download link not found")) else: self.link = m.group(1)
true
true
1c354aa3f18c72b34bc506ed91b7e96cfa3c03a4
41,250
py
Python
tests/conftest.py
deniscapeto/SimpleCorreiosTrackingService
e96bcec580dc6cd2cc89c0e8e038270d40d19164
[ "MIT" ]
null
null
null
tests/conftest.py
deniscapeto/SimpleCorreiosTrackingService
e96bcec580dc6cd2cc89c0e8e038270d40d19164
[ "MIT" ]
12
2020-06-05T23:26:54.000Z
2021-10-02T09:36:41.000Z
tests/conftest.py
deniscapeto/SimpleCorreiosTrackingService
e96bcec580dc6cd2cc89c0e8e038270d40d19164
[ "MIT" ]
1
2019-10-11T00:32:06.000Z
2019-10-11T00:32:06.000Z
import pytest from scts.tracking.domain.models import TrackingEvent @pytest.fixture def fake_html(): return """ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=1" /> <html lang="pt-br"> <head> <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=1" /> <meta http-equiv="Content-Language" Content="pt"> <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> <title>Resultado Rastreamento</title> <meta name="description" content="[page]" /> <meta name="keywords" content="" /> <!-- AppInternalsXpert BMX Integration Begin --> <script> if(!RVBD_EUE){ var RVBD_EUE={startJS:Number(new Date()), clientId:'',appId:1, collector:'apmperformance.correios.com.br', collectorHttpPort:80, collectorHttpsPort:443, sv:'0401', ajax:true, sync:true, ajaxResponseTime:true}; (function(){ var w=window,l=w.addEventListener,m=w.attachEvent, d=document,s='script',t='load',o=RVBD_EUE, r=(('https:'===d.location.protocol)? 'https://apmperformance.correios.com.br:443': 'http://apmperformance.correios.com.br:80')+ '/jsi/riverbed_appinternals.d.'+ (o.ajax?'ajax.js':'js'),p=('onpagehide' in w),e=p?'pageshow':t, j=d.createElement(s),x=d.getElementsByTagName(s)[0], h=function(y){o.ldJS=o.ldJS||new Date();o.per=y?y.persisted:null;}, i=function(){o.ld=1;};o.cookie=d.cookie;d.cookie= '_op_aixPageId=0; path=/; expires='+(new Date(0)).toGMTString(); o.cookieAfterDelete=d.cookie;j.async=1;j.src=r; if(l){l(e,h,false);if(p){l(t,i,false);}}else if(m) {m('on'+e,h);if(p){m('on'+t,i);}} if(o.sync){d.write('<'+s+' src=\''+r+'\'></'+s+'>');} else{x.parentNode.insertBefore(j,x);} })();} </script> <!-- AppInternalsXpert BMX Integration End --> <script type="text/javascript"> pageid = '1C658B24-5056-9163-891FB9FC40735A16'; </script> <noscript> <p>identificador da página</p> </noscript> <base href="/"> <link rel="icon" href="home2014/img/icon.png" type="image/gif"/> <!-- CSS --> <link href="home2014/css/layout.css" rel="stylesheet" type="text/css" /> <!-- /CSS ---> </head> <!--[if lt IE 7]> <body class="ie6"> <![endif]--> <!--[if IE 7]> <body class="ie7"> <![endif]--> <!--[if IE 8]> <body class="ie8"> <![endif]--> <!--[if !IE]>--> <body> <!--<![endif]--> <div class="back"> <div class="acessibilidade"> <div class="wrap"> <ul> <li class="tocontent"> <span class="separator-bar">&nbsp;</span> <a href="javascript:void(0);" onClick="document.location.hash='ancora';" title="Ir para o conte&uacute;do principal da p&aacute;gina">Ir ao conte&uacute;do</a> </li> <li class="font-plus" id="biggerFont"> <a href="javascript:void(0);" title="Aumentar o tamanho da fonte do texto">A</a> </li> <li id="defaultFont"> <span class="separator-dot">&nbsp;</span> <a href="javascript:void(0);" title="Retornar a fonte do texto para tamanho padr&atilde;o">Tamanho padr&atilde;o</a> <span class="separator-dot">&nbsp;</span> </li> <li class="font-minus" id="smallerFont"> <a href="javascript:void(0);" title="Diminuir o tamanho da fonte do texto">A</a> </li> <li class="contrast" id="contrast"> <span class="separator-bar">&nbsp;</span> <a href="javascript:void(0);" title="Modificar para o modo de alto contraste">Contraste</a> <span class="separator-bar">&nbsp;</span> </li> <li> <span class="separator-bar">&nbsp;</span> <a href="http://www.correios.com.br/sobre-correios/sustentabilidade/vertente-social/headmouse-e-teclado-virtual/">Teclado Virtual</a> </li> <li> <a href="http://www.correios.com.br/sobre-correios/sustentabilidade/vertente-social/headmouse-e-teclado-virtual/" id="txt-headmouse">Headmouse</a></li> </ul> </div> </div> <!-- header ---> <div class="header"> <h1 class="logo float-left"> <a href="http://www.correios.com.br" title="Ir para a página incial" alt="Logo Correios"><img src="home2014/img/layout/logo.png" alt="Ir para a página inicial" title="Ir para a página incial"/></a> </h1> <div class="acesso_rapido"> <div class="text-right"> <a href="http://apps2.correios.com.br/faleconosco/app/index.php">Fale com os Correios</a><br/> </div> <div class="produtosaz float-right"> <div class="expo">Outros sites</div> <span class="dados abaaz"> <div class="dadosaz"> <span class="dica"> Acesse a outros sites dos Correios</span><br/> <a href="http://www.correios.com.br/para-voce"><b>Correios para você</b></a><br/> <a href="http://www.correios.com.br/para-sua-empresa"><b>Correios para sua empresa</b></a><br/> <a href="http://www.correios.com.br/sobre-correios"><b>Sobre Correios</b></a><br/> <a href="produtosaz/default.cfm?filtro=R/Z"><b>Espaço da Filatelia</b></a><br/> <a href="http://blog.correios.com.br/correios"><b>Blog Institucional do Correios</b></a><br/> <a href="http://m.correios.com.br"><b>Correios mobile</b></a><br/> </div> </span> </div> <div class="produtosaz float-right"> <div class="expo" >Correios de A a Z</div> <span class="dados abaaz"> <div class="dadosaz"> <span class="dica"> Escolha pela letra inicial dos nossos produtos, serviços e assuntos.<br/> </span><br/> <a href="http://www.correios.com.br/a-a-z/"><b>Todos os itens</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=abc"><b>Correios de A-C</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=def"><b>Correios de D-F</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=ghijklmnopq"><b>Correios de G-Q</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=rstuvwxyz"><b>Correios de R-Z</b></a> </div> </span> </div> </div> <!-- acesso rápido --> <br class="clr"/> </div> <div class="tabs"> <div class="wrap"> <ul class="tabs-list"> <li class="current"> <a href="default.cfm"> <h2>Sistemas</h2> </a> </li> </ul> </div> <!-- /header ---> <div class="wrap"> <div class="content"> <div class="laminas" style="display: block;"> <div class="column1"> <span class="mver"> <span class="dominio"></span> <h3>Rastreamento</h3> <ul> <li><a href="sistemas/rastreamento/default.cfm" target="_self" >Rastreamento de objetos</a></li> <li><a href="http://globaltracktrace.ptc.post/gtt.web/Search.aspx" target="_self" >Rastreamento de objetos em outros países</a></li> <li><a href="https://www.correios.com.br/precisa-de-ajuda/como-rastrear-um-objeto" target="_self" >Como rastrear um objeto</a></li> <li><a href="https://www.correios.com.br/precisa-de-ajuda/como-rastrear-um-objeto/siglas-utilizadas-no-rastreamento-de-objeto" target="_self" >Siglas utilizadas no rastreamento de objetos</a></li> </ul> </span> <!-- Fim Menu vertical --> </div> <!-- column1 --> <div class="column2"> <div class="breadcrumb"></div> <div class="content "> <a name="ancora"></a> <div class="tituloimagem"> <h3><span class="codSro"><span>PU</span><span>524</span><span>124</span><span>388</span><span>BR</span></h3> </div> <div class="msg"> </div> <div class="quadroavisos"> SEDEX 12 e do SEDEX Hoje, representa o hor&aacute;rio real da entrega.</p><p>As informa&ccedil;&otilde;es de rastro de objetos registrados ficar&atilde;o dispon&iacute;veis at&eacute; 180 dias ap&oacute;s a data de postagem.</p><h4>Objetos com origem ou destino fora do Brasil</h4><p>O rastreamento para objetos postados no Brasil com c&oacute;digo iniciado por "R" e "C" e terminado com "BR" n&atilde;o &eacute; garantido fora do territ&oacute;rio brasileiro.</p><p>Para esses objetos, os operadores postais de outros pa&iacute;ses podem n&atilde;o disponibilizar e/ou transmitir informa&ccedil;&atilde;o de rastreamento para o Brasil.</p><p>Sendo assim, consultas de rastreamento de objetos podem tamb&eacute;m ser realizadas nos sites dos operadores de destino dispon&iacute;veis no site da UPU - Uni&atilde;o Postal Universal.</p><p><b>Para os objetos postados no Exterior para o Brasil</b>, o servi&ccedil;o contratado pelo remetente na origem determina o n&iacute;vel de informa&ccedil;&atilde;o de rastreamento de objetos em nosso site.</p> <p>Objetos registrados recebidos do exterior que apresentam código iniciado por "R" não pertencem &agrave; modalidade expressa, portanto não h&aacute; rastreamento ponto a ponto. As informa&ccedil;&otilde;es no sistema de rastreamento para esses objetos "R" incluem apenas os eventos: "recebimento no Brasil", "entrega", "tentativa de entrega" ou "aguardando retirada na unidade responsável". No caso do objeto ser tributado, haver&aacute; os eventos de "encaminhamento para fiscaliza&ccedil;&atilde;o e tributa&ccedil;&atilde;o e "sa&iacute;da da fiscaliza&ccedil;&atilde;o". </p> <p>O prazo estimado de entrega dos objetos registrados &eacute; de 40 DIAS &Uacute;TEIS a partir da confirma&ccedil;&atilde;o de pagamento dos impostos (se tributado) e do despacho postal. <a href="http://www.correios.com.br/encomendas-logistica/entrega/importacao/prazos-dos-servicos-internacionais-de-importacao" target="_blank">Tabela prazos de entrega</a></p> <p>Remessas iniciadas com o c&oacute;digo "UM" n&atilde;o s&atilde;o rastre&aacute;veis no Brasil. Esse c&oacute;digo &eacute; utilizado pelo pa&iacute;s de origem para indicar que a remessa &eacute; pass&iacute;vel de pagamento de imposto de importa&ccedil;&atilde;o no destino.</p> </div> <div class="ctrlcontent"> <!-- pode ser suspenso: PU524124388BR-true--> <div class="highlightSRO"> <!-- imagem: --> <div id="imagemhidden"></div> <div display:block><br /> <br /> <br /><br /> <script src="sistemas/rastreamento/js/qrcode.js" type="text/javascript"></script> <script src="sistemas/rastreamento/js/jquery.qrcode.js" type="text/javascript"></script> </div> </div> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">02/03/2021 <br/> 13:54 <br/> <label style="text-transform:capitalize;">SAO PAULO / SP</label> </td> <td class="sroLbEvent"> <strong>Objeto entregue ao destinatário</strong> <br/> </td> </tr> </table> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">02/03/2021 <br/> 08:59 <br/> <label style="text-transform:capitalize;">SAO PAULO / SP</label> </td> <td class="sroLbEvent"> <strong>Objeto saiu para entrega ao destinatário</strong> <br/> </td> </tr> </table> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">02/03/2021 <br/> 05:09 <br/> SAO PAULO / SP<br/> </td> <td class="sroLbEvent"> <strong>Objeto em trânsito - por favor aguarde</strong> <br/> de Unidade de Tratamento em SAO PAULO / SP para Unidade de Distribuição em SAO PAULO / SP<br/> </td> </tr> </table> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">01/03/2021 <br/> 17:57 <br/> SAO PAULO / SP<br/> </td> <td class="sroLbEvent"> <strong>Objeto em trânsito - por favor aguarde</strong> <br/> de Agência dos Correios em SAO PAULO / SP para Unidade de Tratamento em SAO PAULO / SP<br/> </td> </tr> </table> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">01/03/2021 <br/> 17:27 <br/> <label style="text-transform:capitalize;">SAO PAULO / SP</label> </td> <td class="sroLbEvent"> <strong>Objeto postado</strong> <br/> </td> </tr> </table> <br /> <div id="imagem" style="display:none"> <div class="status"> <img src="home2014/img/SRO/" alt="Objeto entregue ao destinatário"> </div> </div> <script> document.getElementById('imagemhidden').innerHTML = document.getElementById('imagem').innerHTML; </script> <div class="btnform"> <p>Todos os objetos internacionais estão sujeitos à cobrança do despacho postal. Clique <a href="https://www.correios.com.br/encomendas-logistica/minhas-importacoes">aqui</a> para saber mais</p> <button class="btn2 float-left" onClick="location.href='https://www2.correios.com.br/sistemas/rastreamento/'">Nova Consulta</button> </div> <br /><br /> <div class="act-extras" style="display:block; width:430px"> <form name="frmprint" id="frmprint" method="post" action="sistemas/rastreamento/newprint.cfm" target="_blank"> <input type="hidden" name="objetos" value="PU524124388BR" /> </form> <a class="icon print sro" onClick="document.getElementById('frmprint').submit();"><img width="22px" height="22px" src="home2014/img/trans.gif">Imprimir</a> <!-- pode ser suspenso--> <!-- Não está logado, mas passivel de suspensão --> <a class="icon stop sro" onClick="document.getElementById('formTelaSusp').submit();"><img class="fechar" width="22px" height="22px" src="home2014/img/trans.gif">Suspender Entrega</a> <form name="formTelaSusp" id="formTelaSusp" method="post" action="sistemas/rastreamento/suspensaoEntrega/dsp/default.cfm"> <input type="hidden" name="ObjAsuspender" id="ObjAsuspender" value="PU524124388BR" /> </form> </div> <br /> <div class="destaque" style="background-color:##eee; width:98%; display:table; height:auto; padding:10px; margin-bottom:5px;"> <div id="qrcodeTable" style="float:left; width:100px"> <img src="https://chart.apis.google.com/chart?cht=qr&chl=PU524124388BR&chs=116x116" alt="PU524124388BR - QR code"/> </div> <div style="float:left; width:200px; padding:10px 13px; font-size:1.3em;"> Acesse o aplicativo dos Correios e leia o c&oacute;digo 2D ao lado. Voc&ecirc; n&atilde;o precisar&aacute; digitar o c&oacute;digo do objeto e poder&aacute; salvar na sua lista de favoritos. </div> <div style="float:left; width:100px; margin:3px;"> <a href="https://itunes.apple.com/us/app/sro-mobile/id998782060?l=pt&ls=1&mt=8" target="_blank"> <img src="home2014/img/sro/Apple-store.png" width="138" height="43" style="border:2px solid #eee"> </a> <a href="https://play.google.com/store/apps/details?id=br.com.correios.srocorreios" target="_blank"> <img src="home2014/img/sro/Google-store.png" width="138" height="43" style="border:2px solid #eee"> </a> </div> </div> <br /> <a href="https://www.correios.com.br/banner-sro/link-banner-sro" target="_blank"><img src="https://www.correios.com.br/banner-sro/banner_sro/" width="480px" height="94px" /></a> </div> <div class="modal"> <div id="inline_content2" class="inline_content"> <h3><br /> <br /> O status do objeto pesquisado est&aacute; finalizado.<br />N&atilde;o &eacute; poss&iacute;vel solicitar notifica&ccedil;&atilde;o por SMS.</h3> </div> <script src="sistemas/rastreamento/js/jquery.validationEngine.js" type="text/javascript"></script> <script src="sistemas/rastreamento/js/jquery.validationEngine-pt.js" type="text/javascript"></script> <script> //$(document).ready(function(){$("#formSms").validationEngine();}); </script> </div> <script type="text/javascript" src="sistemas/rastreamento/js/sro.js"></script> <script type="text/javascript" src="sistemas/rastreamento/js/jquery.maskedinput.min.js"></script> <script type="text/javascript" src="sistemas/rastreamento/js/MascaraValidacao.js"></script> <!-- TipoBD(01) --> </div> </div> <br class="clr" /> </div> </div> </div> </div><!-- class="wrap" --> <!-- laminas --> <div class="footer"> <div class="wrap"> <div class="column-footer"> <h3><a target="_blank" href="http://apps2.correios.com.br/faleconosco/app/index.php">Fale Conosco</a></h3> <ul> <li class="node"><a href="sistemas/falecomoscorreios">Manifestação via Internet</a> <ul> <li><a href="sistemas/falecomoscorreios">Fale Conosco pelo site</a></li> </ul> </li> <li class="node"> <a href="sistemas/falecomoscorreios">Atendimento telef&ocirc;nico</a> <ul> <li>3003 0100 (Capitais e Regi&atilde;o Metropolitanas)</li> <li>0800 725 7282 (Demais localidades)</li> <li>0800 725 0100 (Sugest&otilde;es ou reclama&ccedil;&otilde;es)</li> <li>0800 725 0898 (exclusivo para portadores <br/> de deficiência auditiva) </li> <li>3003 1383 (Informações Banco Postal)</li> </ul> </li> <li class="node"> <a href="sistemas/agencias/">Rede de atendimento</a> <ul> <li><a href="sistemas/agencias">Consulte endereços e horários de atendimentos <br /> das agências dos Correios</a></li> </ul> </li> <li class="node"> <a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/ouvidoria">Ouvidoria</a> </li> </ul> </div> <div class="column-footer"> <h3><a href="">Portal Correios</a></h3> <ul> <li><a href="http://www.correios.com.br/sitemap">Mapa do site</a></li> <li><a href="sistemas/rastreamento/default.cfm">Rastreamento de objetos</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/sala-de-imprensa/fale-com-a-assessoria-de-comunicacao">Sala de Imprensa</a></li> <li><a href="institucional/concursos/correios/default.cfm">Concursos</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/patrocinio/patrocinio">Patroc&iacute;nios</a></li> <li><a href="http://www.correios.com.br/para-voce/correios-de-a-a-z/contatos-comerciais">Contatos comerciais</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/a-empresa/carta-de-servicos-ao-cidadao">Carta de servi&ccedil;os ao cidad&atilde;o</a></li> <li><a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/denuncias/denuncias/">Denúncia</a></li> <li><a href="http://www.mc.gov.br/">Minist&eacute;rio das Comunica&ccedil;&otilde;es</a></li> </ul> </div> <div class="column-footer"> <h3><a href="">Outros sites dos Correios</a></h3> <ul> <li><a href="http://www.correios.com.br/para-voce">Correios para voc&ecirc;</a></li> <li><a href="http://www.correios.com.br/para-sua-empresa">Correios para sua empresa</a></li> <li><a href="http://www.correios.com.br/sobre-correios">Sobre Correios</a></li> <li><a href="http://shopping.correios.com.br/correiosonline">Loja virtual dos Correios</a></li> <li><a href="http://blog.correios.com.br/correios">Blog dos Correios</a></li> <li><a href="http://blog.correios.com.br/filatelia">Espa&ccedil;o da Filatelia</a></li> <li><a href="http://m.correios.com.br/">Correios Mobile</a></li> <li><a href="http://www2.correios.com.br/">Sistemas dos Correios</a></li> </ul> </div> <div class="copy"> <a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/politica-de-privacidade-e-notas-legais/">Pol&iacute;tica de Privacidade e notas legais</a> - © Copyright 2018 Correios - Todos os direitos reservados. </div> </div> </div> </div> <!-- class="back" --> </body></html> """ @pytest.fixture def fake_invalid_html(): return """ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=1" /> <html lang="pt-br"> <head> <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=1" /> <meta http-equiv="Content-Language" Content="pt"> <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> <title>Resultado Rastreamento</title> <meta name="description" content="[page]" /> <meta name="keywords" content="" /> <!-- AppInternalsXpert BMX Integration Begin --> <script> if(!RVBD_EUE){ var RVBD_EUE={startJS:Number(new Date()), clientId:'',appId:1, collector:'apmperformance.correios.com.br', collectorHttpPort:80, collectorHttpsPort:443, sv:'0401', ajax:true, sync:true, ajaxResponseTime:true}; (function(){ var w=window,l=w.addEventListener,m=w.attachEvent, d=document,s='script',t='load',o=RVBD_EUE, r=(('https:'===d.location.protocol)? 'https://apmperformance.correios.com.br:443': 'http://apmperformance.correios.com.br:80')+ '/jsi/riverbed_appinternals.d.'+ (o.ajax?'ajax.js':'js'),p=('onpagehide' in w),e=p?'pageshow':t, j=d.createElement(s),x=d.getElementsByTagName(s)[0], h=function(y){o.ldJS=o.ldJS||new Date();o.per=y?y.persisted:null;}, i=function(){o.ld=1;};o.cookie=d.cookie;d.cookie= '_op_aixPageId=0; path=/; expires='+(new Date(0)).toGMTString(); o.cookieAfterDelete=d.cookie;j.async=1;j.src=r; if(l){l(e,h,false);if(p){l(t,i,false);}}else if(m) {m('on'+e,h);if(p){m('on'+t,i);}} if(o.sync){d.write('<'+s+' src=\''+r+'\'></'+s+'>');} else{x.parentNode.insertBefore(j,x);} })();} </script> <!-- AppInternalsXpert BMX Integration End --> <script type="text/javascript"> pageid = '1C658B24-5056-9163-891FB9FC40735A16'; </script> <noscript> <p>identificador da página</p> </noscript> <base href="/"> <link rel="icon" href="home2014/img/icon.png" type="image/gif"/> <!-- CSS --> <link href="home2014/css/layout.css" rel="stylesheet" type="text/css" /> <!-- /CSS ---> </head> <!--[if lt IE 7]> <body class="ie6"> <![endif]--> <!--[if IE 7]> <body class="ie7"> <![endif]--> <!--[if IE 8]> <body class="ie8"> <![endif]--> <!--[if !IE]>--> <body> <!--<![endif]--> <div class="back"> <div class="acessibilidade"> <div class="wrap"> <ul> <li class="tocontent"> <span class="separator-bar">&nbsp;</span> <a href="javascript:void(0);" onClick="document.location.hash='ancora';" title="Ir para o conte&uacute;do principal da p&aacute;gina">Ir ao conte&uacute;do</a> </li> <li class="font-plus" id="biggerFont"> <a href="javascript:void(0);" title="Aumentar o tamanho da fonte do texto">A</a> </li> <li id="defaultFont"> <span class="separator-dot">&nbsp;</span> <a href="javascript:void(0);" title="Retornar a fonte do texto para tamanho padr&atilde;o">Tamanho padr&atilde;o</a> <span class="separator-dot">&nbsp;</span> </li> <li class="font-minus" id="smallerFont"> <a href="javascript:void(0);" title="Diminuir o tamanho da fonte do texto">A</a> </li> <li class="contrast" id="contrast"> <span class="separator-bar">&nbsp;</span> <a href="javascript:void(0);" title="Modificar para o modo de alto contraste">Contraste</a> <span class="separator-bar">&nbsp;</span> </li> <li> <span class="separator-bar">&nbsp;</span> <a href="http://www.correios.com.br/sobre-correios/sustentabilidade/vertente-social/headmouse-e-teclado-virtual/">Teclado Virtual</a> </li> <li> <a href="http://www.correios.com.br/sobre-correios/sustentabilidade/vertente-social/headmouse-e-teclado-virtual/" id="txt-headmouse">Headmouse</a></li> </ul> </div> </div> <!-- header ---> <div class="header"> <h1 class="logo float-left"> <a href="http://www.correios.com.br" title="Ir para a página incial" alt="Logo Correios"><img src="home2014/img/layout/logo.png" alt="Ir para a página inicial" title="Ir para a página incial"/></a> </h1> <div class="acesso_rapido"> <div class="text-right"> <a href="http://apps2.correios.com.br/faleconosco/app/index.php">Fale com os Correios</a><br/> </div> <div class="produtosaz float-right"> <div class="expo">Outros sites</div> <span class="dados abaaz"> <div class="dadosaz"> <span class="dica"> Acesse a outros sites dos Correios</span><br/> <a href="http://www.correios.com.br/para-voce"><b>Correios para você</b></a><br/> <a href="http://www.correios.com.br/para-sua-empresa"><b>Correios para sua empresa</b></a><br/> <a href="http://www.correios.com.br/sobre-correios"><b>Sobre Correios</b></a><br/> <a href="produtosaz/default.cfm?filtro=R/Z"><b>Espaço da Filatelia</b></a><br/> <a href="http://blog.correios.com.br/correios"><b>Blog Institucional do Correios</b></a><br/> <a href="http://m.correios.com.br"><b>Correios mobile</b></a><br/> </div> </span> </div> <div class="produtosaz float-right"> <div class="expo" >Correios de A a Z</div> <span class="dados abaaz"> <div class="dadosaz"> <span class="dica"> Escolha pela letra inicial dos nossos produtos, serviços e assuntos.<br/> </span><br/> <a href="http://www.correios.com.br/a-a-z/"><b>Todos os itens</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=abc"><b>Correios de A-C</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=def"><b>Correios de D-F</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=ghijklmnopq"><b>Correios de G-Q</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=rstuvwxyz"><b>Correios de R-Z</b></a> </div> </span> </div> </div> <!-- acesso rápido --> <br class="clr"/> </div> <div class="tabs"> <div class="wrap"> <ul class="tabs-list"> <li class="current"> <a href="default.cfm"> <h2>Sistemas</h2> </a> </li> </ul> </div> <!-- /header ---> <div class="wrap"> <div class="content"> <div class="laminas" style="display: block;"> <div class="column1"> <span class="mver"> <span class="dominio"></span> <h3>Rastreamento</h3> <ul> <li><a href="sistemas/rastreamento/default.cfm" target="_self" >Rastreamento de objetos</a></li> <li><a href="http://globaltracktrace.ptc.post/gtt.web/Search.aspx" target="_self" >Rastreamento de objetos em outros países</a></li> <li><a href="https://www.correios.com.br/precisa-de-ajuda/como-rastrear-um-objeto" target="_self" >Como rastrear um objeto</a></li> <li><a href="https://www.correios.com.br/precisa-de-ajuda/como-rastrear-um-objeto/siglas-utilizadas-no-rastreamento-de-objeto" target="_self" >Siglas utilizadas no rastreamento de objetos</a></li> </ul> </span> <!-- Fim Menu vertical --> </div> <!-- column1 --> <div class="column2"> <div class="breadcrumb"></div> <div class="content "> <a name="ancora"></a> <div class="tituloimagem"> <h3><span class="codSro"><span>PU</span><span>524</span><span>124</span><span>388</span><span>BR</span></h3> </div> <div class="msg"> </div> <div class="quadroavisos"> SEDEX 12 e do SEDEX Hoje, representa o hor&aacute;rio real da entrega.</p><p>As informa&ccedil;&otilde;es de rastro de objetos registrados ficar&atilde;o dispon&iacute;veis at&eacute; 180 dias ap&oacute;s a data de postagem.</p><h4>Objetos com origem ou destino fora do Brasil</h4><p>O rastreamento para objetos postados no Brasil com c&oacute;digo iniciado por "R" e "C" e terminado com "BR" n&atilde;o &eacute; garantido fora do territ&oacute;rio brasileiro.</p><p>Para esses objetos, os operadores postais de outros pa&iacute;ses podem n&atilde;o disponibilizar e/ou transmitir informa&ccedil;&atilde;o de rastreamento para o Brasil.</p><p>Sendo assim, consultas de rastreamento de objetos podem tamb&eacute;m ser realizadas nos sites dos operadores de destino dispon&iacute;veis no site da UPU - Uni&atilde;o Postal Universal.</p><p><b>Para os objetos postados no Exterior para o Brasil</b>, o servi&ccedil;o contratado pelo remetente na origem determina o n&iacute;vel de informa&ccedil;&atilde;o de rastreamento de objetos em nosso site.</p> <p>Objetos registrados recebidos do exterior que apresentam código iniciado por "R" não pertencem &agrave; modalidade expressa, portanto não h&aacute; rastreamento ponto a ponto. As informa&ccedil;&otilde;es no sistema de rastreamento para esses objetos "R" incluem apenas os eventos: "recebimento no Brasil", "entrega", "tentativa de entrega" ou "aguardando retirada na unidade responsável". No caso do objeto ser tributado, haver&aacute; os eventos de "encaminhamento para fiscaliza&ccedil;&atilde;o e tributa&ccedil;&atilde;o e "sa&iacute;da da fiscaliza&ccedil;&atilde;o". </p> <p>O prazo estimado de entrega dos objetos registrados &eacute; de 40 DIAS &Uacute;TEIS a partir da confirma&ccedil;&atilde;o de pagamento dos impostos (se tributado) e do despacho postal. <a href="http://www.correios.com.br/encomendas-logistica/entrega/importacao/prazos-dos-servicos-internacionais-de-importacao" target="_blank">Tabela prazos de entrega</a></p> <p>Remessas iniciadas com o c&oacute;digo "UM" n&atilde;o s&atilde;o rastre&aacute;veis no Brasil. Esse c&oacute;digo &eacute; utilizado pelo pa&iacute;s de origem para indicar que a remessa &eacute; pass&iacute;vel de pagamento de imposto de importa&ccedil;&atilde;o no destino.</p> </div> <div class="ctrlcontent"> <!-- pode ser suspenso: PU524124388BR-true--> <div class="highlightSRO"> <!-- imagem: --> <div id="imagemhidden"></div> <div display:block><br /> <br /> <br /><br /> <script src="sistemas/rastreamento/js/qrcode.js" type="text/javascript"></script> <script src="sistemas/rastreamento/js/jquery.qrcode.js" type="text/javascript"></script> </div> </div> <br /> <script> document.getElementById('imagemhidden').innerHTML = document.getElementById('imagem').innerHTML; </script> <div class="btnform"> <p>Todos os objetos internacionais estão sujeitos à cobrança do despacho postal. Clique <a href="https://www.correios.com.br/encomendas-logistica/minhas-importacoes">aqui</a> para saber mais</p> <button class="btn2 float-left" onClick="location.href='https://www2.correios.com.br/sistemas/rastreamento/'">Nova Consulta</button> </div> <br /><br /> <div class="act-extras" style="display:block; width:430px"> <form name="frmprint" id="frmprint" method="post" action="sistemas/rastreamento/newprint.cfm" target="_blank"> <input type="hidden" name="objetos" value="PU524124388BR" /> </form> <a class="icon print sro" onClick="document.getElementById('frmprint').submit();"><img width="22px" height="22px" src="home2014/img/trans.gif">Imprimir</a> <!-- pode ser suspenso--> <!-- Não está logado, mas passivel de suspensão --> <a class="icon stop sro" onClick="document.getElementById('formTelaSusp').submit();"><img class="fechar" width="22px" height="22px" src="home2014/img/trans.gif">Suspender Entrega</a> <form name="formTelaSusp" id="formTelaSusp" method="post" action="sistemas/rastreamento/suspensaoEntrega/dsp/default.cfm"> <input type="hidden" name="ObjAsuspender" id="ObjAsuspender" value="PU524124388BR" /> </form> </div> <br /> <div class="destaque" style="background-color:##eee; width:98%; display:table; height:auto; padding:10px; margin-bottom:5px;"> <div id="qrcodeTable" style="float:left; width:100px"> <img src="https://chart.apis.google.com/chart?cht=qr&chl=PU524124388BR&chs=116x116" alt="PU524124388BR - QR code"/> </div> <div style="float:left; width:200px; padding:10px 13px; font-size:1.3em;"> Acesse o aplicativo dos Correios e leia o c&oacute;digo 2D ao lado. Voc&ecirc; n&atilde;o precisar&aacute; digitar o c&oacute;digo do objeto e poder&aacute; salvar na sua lista de favoritos. </div> <div style="float:left; width:100px; margin:3px;"> <a href="https://itunes.apple.com/us/app/sro-mobile/id998782060?l=pt&ls=1&mt=8" target="_blank"> <img src="home2014/img/sro/Apple-store.png" width="138" height="43" style="border:2px solid #eee"> </a> <a href="https://play.google.com/store/apps/details?id=br.com.correios.srocorreios" target="_blank"> <img src="home2014/img/sro/Google-store.png" width="138" height="43" style="border:2px solid #eee"> </a> </div> </div> <br /> <a href="https://www.correios.com.br/banner-sro/link-banner-sro" target="_blank"><img src="https://www.correios.com.br/banner-sro/banner_sro/" width="480px" height="94px" /></a> </div> <div class="modal"> <div id="inline_content2" class="inline_content"> <h3><br /> <br /> O status do objeto pesquisado est&aacute; finalizado.<br />N&atilde;o &eacute; poss&iacute;vel solicitar notifica&ccedil;&atilde;o por SMS.</h3> </div> <script src="sistemas/rastreamento/js/jquery.validationEngine.js" type="text/javascript"></script> <script src="sistemas/rastreamento/js/jquery.validationEngine-pt.js" type="text/javascript"></script> <script> //$(document).ready(function(){$("#formSms").validationEngine();}); </script> </div> <script type="text/javascript" src="sistemas/rastreamento/js/sro.js"></script> <script type="text/javascript" src="sistemas/rastreamento/js/jquery.maskedinput.min.js"></script> <script type="text/javascript" src="sistemas/rastreamento/js/MascaraValidacao.js"></script> <!-- TipoBD(01) --> </div> </div> <br class="clr" /> </div> </div> </div> </div><!-- class="wrap" --> <!-- laminas --> <div class="footer"> <div class="wrap"> <div class="column-footer"> <h3><a target="_blank" href="http://apps2.correios.com.br/faleconosco/app/index.php">Fale Conosco</a></h3> <ul> <li class="node"><a href="sistemas/falecomoscorreios">Manifestação via Internet</a> <ul> <li><a href="sistemas/falecomoscorreios">Fale Conosco pelo site</a></li> </ul> </li> <li class="node"> <a href="sistemas/falecomoscorreios">Atendimento telef&ocirc;nico</a> <ul> <li>3003 0100 (Capitais e Regi&atilde;o Metropolitanas)</li> <li>0800 725 7282 (Demais localidades)</li> <li>0800 725 0100 (Sugest&otilde;es ou reclama&ccedil;&otilde;es)</li> <li>0800 725 0898 (exclusivo para portadores <br/> de deficiência auditiva) </li> <li>3003 1383 (Informações Banco Postal)</li> </ul> </li> <li class="node"> <a href="sistemas/agencias/">Rede de atendimento</a> <ul> <li><a href="sistemas/agencias">Consulte endereços e horários de atendimentos <br /> das agências dos Correios</a></li> </ul> </li> <li class="node"> <a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/ouvidoria">Ouvidoria</a> </li> </ul> </div> <div class="column-footer"> <h3><a href="">Portal Correios</a></h3> <ul> <li><a href="http://www.correios.com.br/sitemap">Mapa do site</a></li> <li><a href="sistemas/rastreamento/default.cfm">Rastreamento de objetos</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/sala-de-imprensa/fale-com-a-assessoria-de-comunicacao">Sala de Imprensa</a></li> <li><a href="institucional/concursos/correios/default.cfm">Concursos</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/patrocinio/patrocinio">Patroc&iacute;nios</a></li> <li><a href="http://www.correios.com.br/para-voce/correios-de-a-a-z/contatos-comerciais">Contatos comerciais</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/a-empresa/carta-de-servicos-ao-cidadao">Carta de servi&ccedil;os ao cidad&atilde;o</a></li> <li><a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/denuncias/denuncias/">Denúncia</a></li> <li><a href="http://www.mc.gov.br/">Minist&eacute;rio das Comunica&ccedil;&otilde;es</a></li> </ul> </div> <div class="column-footer"> <h3><a href="">Outros sites dos Correios</a></h3> <ul> <li><a href="http://www.correios.com.br/para-voce">Correios para voc&ecirc;</a></li> <li><a href="http://www.correios.com.br/para-sua-empresa">Correios para sua empresa</a></li> <li><a href="http://www.correios.com.br/sobre-correios">Sobre Correios</a></li> <li><a href="http://shopping.correios.com.br/correiosonline">Loja virtual dos Correios</a></li> <li><a href="http://blog.correios.com.br/correios">Blog dos Correios</a></li> <li><a href="http://blog.correios.com.br/filatelia">Espa&ccedil;o da Filatelia</a></li> <li><a href="http://m.correios.com.br/">Correios Mobile</a></li> <li><a href="http://www2.correios.com.br/">Sistemas dos Correios</a></li> </ul> </div> <div class="copy"> <a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/politica-de-privacidade-e-notas-legais/">Pol&iacute;tica de Privacidade e notas legais</a> - © Copyright 2018 Correios - Todos os direitos reservados. </div> </div> </div> </div> </body></html> """ @pytest.fixture def tracking_events_dict( tracking_code ): return [ {'code': tracking_code, 'date': '02/03/2021 13:54', 'location': 'SAO PAULO / SP', 'description': 'Objeto entregue ao destinatário'}, {'code': tracking_code, 'date': '02/03/2021 08:59', 'location': 'SAO PAULO / SP', 'description': 'Objeto saiu para entrega ao destinatário'}, {'code': tracking_code, 'date': '02/03/2021 05:09', 'location': 'SAO PAULO / SP', 'description': 'Objeto em trânsito - por favor aguarde de Unidade de Tratamento em SAO PAULO / SP para Unidade de Distribuição em SAO PAULO / SP'}, {'code': tracking_code, 'date': '01/03/2021 17:57', 'location': 'SAO PAULO / SP', 'description': 'Objeto em trânsito - por favor aguarde de Agência dos Correios em SAO PAULO / SP para Unidade de Tratamento em SAO PAULO / SP'}, {'code': tracking_code, 'date': '01/03/2021 17:27', 'location': 'SAO PAULO / SP', 'description': 'Objeto postado'} ] @pytest.fixture def tracking_events_list( tracking_code ): return [ TrackingEvent(tracking_code, '02/03/2021 13:54', 'SAO PAULO / SP', 'Objeto entregue ao destinatário'), TrackingEvent(tracking_code, '02/03/2021 08:59', 'SAO PAULO / SP', 'Objeto saiu para entrega ao destinatário'), TrackingEvent(tracking_code, '02/03/2021 05:09', 'SAO PAULO / SP', 'Objeto em trânsito - por favor aguarde de Unidade de Tratamento em SAO PAULO / SP para Unidade de Distribuição em SAO PAULO / SP'), TrackingEvent(tracking_code, '01/03/2021 17:57', 'SAO PAULO / SP', 'Objeto em trânsito - por favor aguarde de Agência dos Correios em SAO PAULO / SP para Unidade de Tratamento em SAO PAULO / SP'), TrackingEvent(tracking_code, '01/03/2021 17:27', 'SAO PAULO / SP', 'Objeto postado') ] @pytest.fixture def tracking_code(): return 'ON769530126BR'
52.347716
1,114
0.613939
import pytest from scts.tracking.domain.models import TrackingEvent @pytest.fixture def fake_html(): return """ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=1" /> <html lang="pt-br"> <head> <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=1" /> <meta http-equiv="Content-Language" Content="pt"> <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> <title>Resultado Rastreamento</title> <meta name="description" content="[page]" /> <meta name="keywords" content="" /> <!-- AppInternalsXpert BMX Integration Begin --> <script> if(!RVBD_EUE){ var RVBD_EUE={startJS:Number(new Date()), clientId:'',appId:1, collector:'apmperformance.correios.com.br', collectorHttpPort:80, collectorHttpsPort:443, sv:'0401', ajax:true, sync:true, ajaxResponseTime:true}; (function(){ var w=window,l=w.addEventListener,m=w.attachEvent, d=document,s='script',t='load',o=RVBD_EUE, r=(('https:'===d.location.protocol)? 'https://apmperformance.correios.com.br:443': 'http://apmperformance.correios.com.br:80')+ '/jsi/riverbed_appinternals.d.'+ (o.ajax?'ajax.js':'js'),p=('onpagehide' in w),e=p?'pageshow':t, j=d.createElement(s),x=d.getElementsByTagName(s)[0], h=function(y){o.ldJS=o.ldJS||new Date();o.per=y?y.persisted:null;}, i=function(){o.ld=1;};o.cookie=d.cookie;d.cookie= '_op_aixPageId=0; path=/; expires='+(new Date(0)).toGMTString(); o.cookieAfterDelete=d.cookie;j.async=1;j.src=r; if(l){l(e,h,false);if(p){l(t,i,false);}}else if(m) {m('on'+e,h);if(p){m('on'+t,i);}} if(o.sync){d.write('<'+s+' src=\''+r+'\'></'+s+'>');} else{x.parentNode.insertBefore(j,x);} })();} </script> <!-- AppInternalsXpert BMX Integration End --> <script type="text/javascript"> pageid = '1C658B24-5056-9163-891FB9FC40735A16'; </script> <noscript> <p>identificador da página</p> </noscript> <base href="/"> <link rel="icon" href="home2014/img/icon.png" type="image/gif"/> <!-- CSS --> <link href="home2014/css/layout.css" rel="stylesheet" type="text/css" /> <!-- /CSS ---> </head> <!--[if lt IE 7]> <body class="ie6"> <![endif]--> <!--[if IE 7]> <body class="ie7"> <![endif]--> <!--[if IE 8]> <body class="ie8"> <![endif]--> <!--[if !IE]>--> <body> <!--<![endif]--> <div class="back"> <div class="acessibilidade"> <div class="wrap"> <ul> <li class="tocontent"> <span class="separator-bar">&nbsp;</span> <a href="javascript:void(0);" onClick="document.location.hash='ancora';" title="Ir para o conte&uacute;do principal da p&aacute;gina">Ir ao conte&uacute;do</a> </li> <li class="font-plus" id="biggerFont"> <a href="javascript:void(0);" title="Aumentar o tamanho da fonte do texto">A</a> </li> <li id="defaultFont"> <span class="separator-dot">&nbsp;</span> <a href="javascript:void(0);" title="Retornar a fonte do texto para tamanho padr&atilde;o">Tamanho padr&atilde;o</a> <span class="separator-dot">&nbsp;</span> </li> <li class="font-minus" id="smallerFont"> <a href="javascript:void(0);" title="Diminuir o tamanho da fonte do texto">A</a> </li> <li class="contrast" id="contrast"> <span class="separator-bar">&nbsp;</span> <a href="javascript:void(0);" title="Modificar para o modo de alto contraste">Contraste</a> <span class="separator-bar">&nbsp;</span> </li> <li> <span class="separator-bar">&nbsp;</span> <a href="http://www.correios.com.br/sobre-correios/sustentabilidade/vertente-social/headmouse-e-teclado-virtual/">Teclado Virtual</a> </li> <li> <a href="http://www.correios.com.br/sobre-correios/sustentabilidade/vertente-social/headmouse-e-teclado-virtual/" id="txt-headmouse">Headmouse</a></li> </ul> </div> </div> <!-- header ---> <div class="header"> <h1 class="logo float-left"> <a href="http://www.correios.com.br" title="Ir para a página incial" alt="Logo Correios"><img src="home2014/img/layout/logo.png" alt="Ir para a página inicial" title="Ir para a página incial"/></a> </h1> <div class="acesso_rapido"> <div class="text-right"> <a href="http://apps2.correios.com.br/faleconosco/app/index.php">Fale com os Correios</a><br/> </div> <div class="produtosaz float-right"> <div class="expo">Outros sites</div> <span class="dados abaaz"> <div class="dadosaz"> <span class="dica"> Acesse a outros sites dos Correios</span><br/> <a href="http://www.correios.com.br/para-voce"><b>Correios para você</b></a><br/> <a href="http://www.correios.com.br/para-sua-empresa"><b>Correios para sua empresa</b></a><br/> <a href="http://www.correios.com.br/sobre-correios"><b>Sobre Correios</b></a><br/> <a href="produtosaz/default.cfm?filtro=R/Z"><b>Espaço da Filatelia</b></a><br/> <a href="http://blog.correios.com.br/correios"><b>Blog Institucional do Correios</b></a><br/> <a href="http://m.correios.com.br"><b>Correios mobile</b></a><br/> </div> </span> </div> <div class="produtosaz float-right"> <div class="expo" >Correios de A a Z</div> <span class="dados abaaz"> <div class="dadosaz"> <span class="dica"> Escolha pela letra inicial dos nossos produtos, serviços e assuntos.<br/> </span><br/> <a href="http://www.correios.com.br/a-a-z/"><b>Todos os itens</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=abc"><b>Correios de A-C</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=def"><b>Correios de D-F</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=ghijklmnopq"><b>Correios de G-Q</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=rstuvwxyz"><b>Correios de R-Z</b></a> </div> </span> </div> </div> <!-- acesso rápido --> <br class="clr"/> </div> <div class="tabs"> <div class="wrap"> <ul class="tabs-list"> <li class="current"> <a href="default.cfm"> <h2>Sistemas</h2> </a> </li> </ul> </div> <!-- /header ---> <div class="wrap"> <div class="content"> <div class="laminas" style="display: block;"> <div class="column1"> <span class="mver"> <span class="dominio"></span> <h3>Rastreamento</h3> <ul> <li><a href="sistemas/rastreamento/default.cfm" target="_self" >Rastreamento de objetos</a></li> <li><a href="http://globaltracktrace.ptc.post/gtt.web/Search.aspx" target="_self" >Rastreamento de objetos em outros países</a></li> <li><a href="https://www.correios.com.br/precisa-de-ajuda/como-rastrear-um-objeto" target="_self" >Como rastrear um objeto</a></li> <li><a href="https://www.correios.com.br/precisa-de-ajuda/como-rastrear-um-objeto/siglas-utilizadas-no-rastreamento-de-objeto" target="_self" >Siglas utilizadas no rastreamento de objetos</a></li> </ul> </span> <!-- Fim Menu vertical --> </div> <!-- column1 --> <div class="column2"> <div class="breadcrumb"></div> <div class="content "> <a name="ancora"></a> <div class="tituloimagem"> <h3><span class="codSro"><span>PU</span><span>524</span><span>124</span><span>388</span><span>BR</span></h3> </div> <div class="msg"> </div> <div class="quadroavisos"> SEDEX 12 e do SEDEX Hoje, representa o hor&aacute;rio real da entrega.</p><p>As informa&ccedil;&otilde;es de rastro de objetos registrados ficar&atilde;o dispon&iacute;veis at&eacute; 180 dias ap&oacute;s a data de postagem.</p><h4>Objetos com origem ou destino fora do Brasil</h4><p>O rastreamento para objetos postados no Brasil com c&oacute;digo iniciado por "R" e "C" e terminado com "BR" n&atilde;o &eacute; garantido fora do territ&oacute;rio brasileiro.</p><p>Para esses objetos, os operadores postais de outros pa&iacute;ses podem n&atilde;o disponibilizar e/ou transmitir informa&ccedil;&atilde;o de rastreamento para o Brasil.</p><p>Sendo assim, consultas de rastreamento de objetos podem tamb&eacute;m ser realizadas nos sites dos operadores de destino dispon&iacute;veis no site da UPU - Uni&atilde;o Postal Universal.</p><p><b>Para os objetos postados no Exterior para o Brasil</b>, o servi&ccedil;o contratado pelo remetente na origem determina o n&iacute;vel de informa&ccedil;&atilde;o de rastreamento de objetos em nosso site.</p> <p>Objetos registrados recebidos do exterior que apresentam código iniciado por "R" não pertencem &agrave; modalidade expressa, portanto não h&aacute; rastreamento ponto a ponto. As informa&ccedil;&otilde;es no sistema de rastreamento para esses objetos "R" incluem apenas os eventos: "recebimento no Brasil", "entrega", "tentativa de entrega" ou "aguardando retirada na unidade responsável". No caso do objeto ser tributado, haver&aacute; os eventos de "encaminhamento para fiscaliza&ccedil;&atilde;o e tributa&ccedil;&atilde;o e "sa&iacute;da da fiscaliza&ccedil;&atilde;o". </p> <p>O prazo estimado de entrega dos objetos registrados &eacute; de 40 DIAS &Uacute;TEIS a partir da confirma&ccedil;&atilde;o de pagamento dos impostos (se tributado) e do despacho postal. <a href="http://www.correios.com.br/encomendas-logistica/entrega/importacao/prazos-dos-servicos-internacionais-de-importacao" target="_blank">Tabela prazos de entrega</a></p> <p>Remessas iniciadas com o c&oacute;digo "UM" n&atilde;o s&atilde;o rastre&aacute;veis no Brasil. Esse c&oacute;digo &eacute; utilizado pelo pa&iacute;s de origem para indicar que a remessa &eacute; pass&iacute;vel de pagamento de imposto de importa&ccedil;&atilde;o no destino.</p> </div> <div class="ctrlcontent"> <!-- pode ser suspenso: PU524124388BR-true--> <div class="highlightSRO"> <!-- imagem: --> <div id="imagemhidden"></div> <div display:block><br /> <br /> <br /><br /> <script src="sistemas/rastreamento/js/qrcode.js" type="text/javascript"></script> <script src="sistemas/rastreamento/js/jquery.qrcode.js" type="text/javascript"></script> </div> </div> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">02/03/2021 <br/> 13:54 <br/> <label style="text-transform:capitalize;">SAO PAULO / SP</label> </td> <td class="sroLbEvent"> <strong>Objeto entregue ao destinatário</strong> <br/> </td> </tr> </table> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">02/03/2021 <br/> 08:59 <br/> <label style="text-transform:capitalize;">SAO PAULO / SP</label> </td> <td class="sroLbEvent"> <strong>Objeto saiu para entrega ao destinatário</strong> <br/> </td> </tr> </table> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">02/03/2021 <br/> 05:09 <br/> SAO PAULO / SP<br/> </td> <td class="sroLbEvent"> <strong>Objeto em trânsito - por favor aguarde</strong> <br/> de Unidade de Tratamento em SAO PAULO / SP para Unidade de Distribuição em SAO PAULO / SP<br/> </td> </tr> </table> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">01/03/2021 <br/> 17:57 <br/> SAO PAULO / SP<br/> </td> <td class="sroLbEvent"> <strong>Objeto em trânsito - por favor aguarde</strong> <br/> de Agência dos Correios em SAO PAULO / SP para Unidade de Tratamento em SAO PAULO / SP<br/> </td> </tr> </table> <table class="listEvent sro"> <tr> <td class="sroDtEvent" valign="top">01/03/2021 <br/> 17:27 <br/> <label style="text-transform:capitalize;">SAO PAULO / SP</label> </td> <td class="sroLbEvent"> <strong>Objeto postado</strong> <br/> </td> </tr> </table> <br /> <div id="imagem" style="display:none"> <div class="status"> <img src="home2014/img/SRO/" alt="Objeto entregue ao destinatário"> </div> </div> <script> document.getElementById('imagemhidden').innerHTML = document.getElementById('imagem').innerHTML; </script> <div class="btnform"> <p>Todos os objetos internacionais estão sujeitos à cobrança do despacho postal. Clique <a href="https://www.correios.com.br/encomendas-logistica/minhas-importacoes">aqui</a> para saber mais</p> <button class="btn2 float-left" onClick="location.href='https://www2.correios.com.br/sistemas/rastreamento/'">Nova Consulta</button> </div> <br /><br /> <div class="act-extras" style="display:block; width:430px"> <form name="frmprint" id="frmprint" method="post" action="sistemas/rastreamento/newprint.cfm" target="_blank"> <input type="hidden" name="objetos" value="PU524124388BR" /> </form> <a class="icon print sro" onClick="document.getElementById('frmprint').submit();"><img width="22px" height="22px" src="home2014/img/trans.gif">Imprimir</a> <!-- pode ser suspenso--> <!-- Não está logado, mas passivel de suspensão --> <a class="icon stop sro" onClick="document.getElementById('formTelaSusp').submit();"><img class="fechar" width="22px" height="22px" src="home2014/img/trans.gif">Suspender Entrega</a> <form name="formTelaSusp" id="formTelaSusp" method="post" action="sistemas/rastreamento/suspensaoEntrega/dsp/default.cfm"> <input type="hidden" name="ObjAsuspender" id="ObjAsuspender" value="PU524124388BR" /> </form> </div> <br /> <div class="destaque" style="background-color:##eee; width:98%; display:table; height:auto; padding:10px; margin-bottom:5px;"> <div id="qrcodeTable" style="float:left; width:100px"> <img src="https://chart.apis.google.com/chart?cht=qr&chl=PU524124388BR&chs=116x116" alt="PU524124388BR - QR code"/> </div> <div style="float:left; width:200px; padding:10px 13px; font-size:1.3em;"> Acesse o aplicativo dos Correios e leia o c&oacute;digo 2D ao lado. Voc&ecirc; n&atilde;o precisar&aacute; digitar o c&oacute;digo do objeto e poder&aacute; salvar na sua lista de favoritos. </div> <div style="float:left; width:100px; margin:3px;"> <a href="https://itunes.apple.com/us/app/sro-mobile/id998782060?l=pt&ls=1&mt=8" target="_blank"> <img src="home2014/img/sro/Apple-store.png" width="138" height="43" style="border:2px solid #eee"> </a> <a href="https://play.google.com/store/apps/details?id=br.com.correios.srocorreios" target="_blank"> <img src="home2014/img/sro/Google-store.png" width="138" height="43" style="border:2px solid #eee"> </a> </div> </div> <br /> <a href="https://www.correios.com.br/banner-sro/link-banner-sro" target="_blank"><img src="https://www.correios.com.br/banner-sro/banner_sro/" width="480px" height="94px" /></a> </div> <div class="modal"> <div id="inline_content2" class="inline_content"> <h3><br /> <br /> O status do objeto pesquisado est&aacute; finalizado.<br />N&atilde;o &eacute; poss&iacute;vel solicitar notifica&ccedil;&atilde;o por SMS.</h3> </div> <script src="sistemas/rastreamento/js/jquery.validationEngine.js" type="text/javascript"></script> <script src="sistemas/rastreamento/js/jquery.validationEngine-pt.js" type="text/javascript"></script> <script> //$(document).ready(function(){$("#formSms").validationEngine();}); </script> </div> <script type="text/javascript" src="sistemas/rastreamento/js/sro.js"></script> <script type="text/javascript" src="sistemas/rastreamento/js/jquery.maskedinput.min.js"></script> <script type="text/javascript" src="sistemas/rastreamento/js/MascaraValidacao.js"></script> <!-- TipoBD(01) --> </div> </div> <br class="clr" /> </div> </div> </div> </div><!-- class="wrap" --> <!-- laminas --> <div class="footer"> <div class="wrap"> <div class="column-footer"> <h3><a target="_blank" href="http://apps2.correios.com.br/faleconosco/app/index.php">Fale Conosco</a></h3> <ul> <li class="node"><a href="sistemas/falecomoscorreios">Manifestação via Internet</a> <ul> <li><a href="sistemas/falecomoscorreios">Fale Conosco pelo site</a></li> </ul> </li> <li class="node"> <a href="sistemas/falecomoscorreios">Atendimento telef&ocirc;nico</a> <ul> <li>3003 0100 (Capitais e Regi&atilde;o Metropolitanas)</li> <li>0800 725 7282 (Demais localidades)</li> <li>0800 725 0100 (Sugest&otilde;es ou reclama&ccedil;&otilde;es)</li> <li>0800 725 0898 (exclusivo para portadores <br/> de deficiência auditiva) </li> <li>3003 1383 (Informações Banco Postal)</li> </ul> </li> <li class="node"> <a href="sistemas/agencias/">Rede de atendimento</a> <ul> <li><a href="sistemas/agencias">Consulte endereços e horários de atendimentos <br /> das agências dos Correios</a></li> </ul> </li> <li class="node"> <a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/ouvidoria">Ouvidoria</a> </li> </ul> </div> <div class="column-footer"> <h3><a href="">Portal Correios</a></h3> <ul> <li><a href="http://www.correios.com.br/sitemap">Mapa do site</a></li> <li><a href="sistemas/rastreamento/default.cfm">Rastreamento de objetos</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/sala-de-imprensa/fale-com-a-assessoria-de-comunicacao">Sala de Imprensa</a></li> <li><a href="institucional/concursos/correios/default.cfm">Concursos</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/patrocinio/patrocinio">Patroc&iacute;nios</a></li> <li><a href="http://www.correios.com.br/para-voce/correios-de-a-a-z/contatos-comerciais">Contatos comerciais</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/a-empresa/carta-de-servicos-ao-cidadao">Carta de servi&ccedil;os ao cidad&atilde;o</a></li> <li><a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/denuncias/denuncias/">Denúncia</a></li> <li><a href="http://www.mc.gov.br/">Minist&eacute;rio das Comunica&ccedil;&otilde;es</a></li> </ul> </div> <div class="column-footer"> <h3><a href="">Outros sites dos Correios</a></h3> <ul> <li><a href="http://www.correios.com.br/para-voce">Correios para voc&ecirc;</a></li> <li><a href="http://www.correios.com.br/para-sua-empresa">Correios para sua empresa</a></li> <li><a href="http://www.correios.com.br/sobre-correios">Sobre Correios</a></li> <li><a href="http://shopping.correios.com.br/correiosonline">Loja virtual dos Correios</a></li> <li><a href="http://blog.correios.com.br/correios">Blog dos Correios</a></li> <li><a href="http://blog.correios.com.br/filatelia">Espa&ccedil;o da Filatelia</a></li> <li><a href="http://m.correios.com.br/">Correios Mobile</a></li> <li><a href="http://www2.correios.com.br/">Sistemas dos Correios</a></li> </ul> </div> <div class="copy"> <a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/politica-de-privacidade-e-notas-legais/">Pol&iacute;tica de Privacidade e notas legais</a> - © Copyright 2018 Correios - Todos os direitos reservados. </div> </div> </div> </div> <!-- class="back" --> </body></html> """ @pytest.fixture def fake_invalid_html(): return """ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=1" /> <html lang="pt-br"> <head> <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=1" /> <meta http-equiv="Content-Language" Content="pt"> <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> <title>Resultado Rastreamento</title> <meta name="description" content="[page]" /> <meta name="keywords" content="" /> <!-- AppInternalsXpert BMX Integration Begin --> <script> if(!RVBD_EUE){ var RVBD_EUE={startJS:Number(new Date()), clientId:'',appId:1, collector:'apmperformance.correios.com.br', collectorHttpPort:80, collectorHttpsPort:443, sv:'0401', ajax:true, sync:true, ajaxResponseTime:true}; (function(){ var w=window,l=w.addEventListener,m=w.attachEvent, d=document,s='script',t='load',o=RVBD_EUE, r=(('https:'===d.location.protocol)? 'https://apmperformance.correios.com.br:443': 'http://apmperformance.correios.com.br:80')+ '/jsi/riverbed_appinternals.d.'+ (o.ajax?'ajax.js':'js'),p=('onpagehide' in w),e=p?'pageshow':t, j=d.createElement(s),x=d.getElementsByTagName(s)[0], h=function(y){o.ldJS=o.ldJS||new Date();o.per=y?y.persisted:null;}, i=function(){o.ld=1;};o.cookie=d.cookie;d.cookie= '_op_aixPageId=0; path=/; expires='+(new Date(0)).toGMTString(); o.cookieAfterDelete=d.cookie;j.async=1;j.src=r; if(l){l(e,h,false);if(p){l(t,i,false);}}else if(m) {m('on'+e,h);if(p){m('on'+t,i);}} if(o.sync){d.write('<'+s+' src=\''+r+'\'></'+s+'>');} else{x.parentNode.insertBefore(j,x);} })();} </script> <!-- AppInternalsXpert BMX Integration End --> <script type="text/javascript"> pageid = '1C658B24-5056-9163-891FB9FC40735A16'; </script> <noscript> <p>identificador da página</p> </noscript> <base href="/"> <link rel="icon" href="home2014/img/icon.png" type="image/gif"/> <!-- CSS --> <link href="home2014/css/layout.css" rel="stylesheet" type="text/css" /> <!-- /CSS ---> </head> <!--[if lt IE 7]> <body class="ie6"> <![endif]--> <!--[if IE 7]> <body class="ie7"> <![endif]--> <!--[if IE 8]> <body class="ie8"> <![endif]--> <!--[if !IE]>--> <body> <!--<![endif]--> <div class="back"> <div class="acessibilidade"> <div class="wrap"> <ul> <li class="tocontent"> <span class="separator-bar">&nbsp;</span> <a href="javascript:void(0);" onClick="document.location.hash='ancora';" title="Ir para o conte&uacute;do principal da p&aacute;gina">Ir ao conte&uacute;do</a> </li> <li class="font-plus" id="biggerFont"> <a href="javascript:void(0);" title="Aumentar o tamanho da fonte do texto">A</a> </li> <li id="defaultFont"> <span class="separator-dot">&nbsp;</span> <a href="javascript:void(0);" title="Retornar a fonte do texto para tamanho padr&atilde;o">Tamanho padr&atilde;o</a> <span class="separator-dot">&nbsp;</span> </li> <li class="font-minus" id="smallerFont"> <a href="javascript:void(0);" title="Diminuir o tamanho da fonte do texto">A</a> </li> <li class="contrast" id="contrast"> <span class="separator-bar">&nbsp;</span> <a href="javascript:void(0);" title="Modificar para o modo de alto contraste">Contraste</a> <span class="separator-bar">&nbsp;</span> </li> <li> <span class="separator-bar">&nbsp;</span> <a href="http://www.correios.com.br/sobre-correios/sustentabilidade/vertente-social/headmouse-e-teclado-virtual/">Teclado Virtual</a> </li> <li> <a href="http://www.correios.com.br/sobre-correios/sustentabilidade/vertente-social/headmouse-e-teclado-virtual/" id="txt-headmouse">Headmouse</a></li> </ul> </div> </div> <!-- header ---> <div class="header"> <h1 class="logo float-left"> <a href="http://www.correios.com.br" title="Ir para a página incial" alt="Logo Correios"><img src="home2014/img/layout/logo.png" alt="Ir para a página inicial" title="Ir para a página incial"/></a> </h1> <div class="acesso_rapido"> <div class="text-right"> <a href="http://apps2.correios.com.br/faleconosco/app/index.php">Fale com os Correios</a><br/> </div> <div class="produtosaz float-right"> <div class="expo">Outros sites</div> <span class="dados abaaz"> <div class="dadosaz"> <span class="dica"> Acesse a outros sites dos Correios</span><br/> <a href="http://www.correios.com.br/para-voce"><b>Correios para você</b></a><br/> <a href="http://www.correios.com.br/para-sua-empresa"><b>Correios para sua empresa</b></a><br/> <a href="http://www.correios.com.br/sobre-correios"><b>Sobre Correios</b></a><br/> <a href="produtosaz/default.cfm?filtro=R/Z"><b>Espaço da Filatelia</b></a><br/> <a href="http://blog.correios.com.br/correios"><b>Blog Institucional do Correios</b></a><br/> <a href="http://m.correios.com.br"><b>Correios mobile</b></a><br/> </div> </span> </div> <div class="produtosaz float-right"> <div class="expo" >Correios de A a Z</div> <span class="dados abaaz"> <div class="dadosaz"> <span class="dica"> Escolha pela letra inicial dos nossos produtos, serviços e assuntos.<br/> </span><br/> <a href="http://www.correios.com.br/a-a-z/"><b>Todos os itens</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=abc"><b>Correios de A-C</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=def"><b>Correios de D-F</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=ghijklmnopq"><b>Correios de G-Q</b></a><br/> <a href="http://www.correios.com.br/a-a-z/?filtro=rstuvwxyz"><b>Correios de R-Z</b></a> </div> </span> </div> </div> <!-- acesso rápido --> <br class="clr"/> </div> <div class="tabs"> <div class="wrap"> <ul class="tabs-list"> <li class="current"> <a href="default.cfm"> <h2>Sistemas</h2> </a> </li> </ul> </div> <!-- /header ---> <div class="wrap"> <div class="content"> <div class="laminas" style="display: block;"> <div class="column1"> <span class="mver"> <span class="dominio"></span> <h3>Rastreamento</h3> <ul> <li><a href="sistemas/rastreamento/default.cfm" target="_self" >Rastreamento de objetos</a></li> <li><a href="http://globaltracktrace.ptc.post/gtt.web/Search.aspx" target="_self" >Rastreamento de objetos em outros países</a></li> <li><a href="https://www.correios.com.br/precisa-de-ajuda/como-rastrear-um-objeto" target="_self" >Como rastrear um objeto</a></li> <li><a href="https://www.correios.com.br/precisa-de-ajuda/como-rastrear-um-objeto/siglas-utilizadas-no-rastreamento-de-objeto" target="_self" >Siglas utilizadas no rastreamento de objetos</a></li> </ul> </span> <!-- Fim Menu vertical --> </div> <!-- column1 --> <div class="column2"> <div class="breadcrumb"></div> <div class="content "> <a name="ancora"></a> <div class="tituloimagem"> <h3><span class="codSro"><span>PU</span><span>524</span><span>124</span><span>388</span><span>BR</span></h3> </div> <div class="msg"> </div> <div class="quadroavisos"> SEDEX 12 e do SEDEX Hoje, representa o hor&aacute;rio real da entrega.</p><p>As informa&ccedil;&otilde;es de rastro de objetos registrados ficar&atilde;o dispon&iacute;veis at&eacute; 180 dias ap&oacute;s a data de postagem.</p><h4>Objetos com origem ou destino fora do Brasil</h4><p>O rastreamento para objetos postados no Brasil com c&oacute;digo iniciado por "R" e "C" e terminado com "BR" n&atilde;o &eacute; garantido fora do territ&oacute;rio brasileiro.</p><p>Para esses objetos, os operadores postais de outros pa&iacute;ses podem n&atilde;o disponibilizar e/ou transmitir informa&ccedil;&atilde;o de rastreamento para o Brasil.</p><p>Sendo assim, consultas de rastreamento de objetos podem tamb&eacute;m ser realizadas nos sites dos operadores de destino dispon&iacute;veis no site da UPU - Uni&atilde;o Postal Universal.</p><p><b>Para os objetos postados no Exterior para o Brasil</b>, o servi&ccedil;o contratado pelo remetente na origem determina o n&iacute;vel de informa&ccedil;&atilde;o de rastreamento de objetos em nosso site.</p> <p>Objetos registrados recebidos do exterior que apresentam código iniciado por "R" não pertencem &agrave; modalidade expressa, portanto não h&aacute; rastreamento ponto a ponto. As informa&ccedil;&otilde;es no sistema de rastreamento para esses objetos "R" incluem apenas os eventos: "recebimento no Brasil", "entrega", "tentativa de entrega" ou "aguardando retirada na unidade responsável". No caso do objeto ser tributado, haver&aacute; os eventos de "encaminhamento para fiscaliza&ccedil;&atilde;o e tributa&ccedil;&atilde;o e "sa&iacute;da da fiscaliza&ccedil;&atilde;o". </p> <p>O prazo estimado de entrega dos objetos registrados &eacute; de 40 DIAS &Uacute;TEIS a partir da confirma&ccedil;&atilde;o de pagamento dos impostos (se tributado) e do despacho postal. <a href="http://www.correios.com.br/encomendas-logistica/entrega/importacao/prazos-dos-servicos-internacionais-de-importacao" target="_blank">Tabela prazos de entrega</a></p> <p>Remessas iniciadas com o c&oacute;digo "UM" n&atilde;o s&atilde;o rastre&aacute;veis no Brasil. Esse c&oacute;digo &eacute; utilizado pelo pa&iacute;s de origem para indicar que a remessa &eacute; pass&iacute;vel de pagamento de imposto de importa&ccedil;&atilde;o no destino.</p> </div> <div class="ctrlcontent"> <!-- pode ser suspenso: PU524124388BR-true--> <div class="highlightSRO"> <!-- imagem: --> <div id="imagemhidden"></div> <div display:block><br /> <br /> <br /><br /> <script src="sistemas/rastreamento/js/qrcode.js" type="text/javascript"></script> <script src="sistemas/rastreamento/js/jquery.qrcode.js" type="text/javascript"></script> </div> </div> <br /> <script> document.getElementById('imagemhidden').innerHTML = document.getElementById('imagem').innerHTML; </script> <div class="btnform"> <p>Todos os objetos internacionais estão sujeitos à cobrança do despacho postal. Clique <a href="https://www.correios.com.br/encomendas-logistica/minhas-importacoes">aqui</a> para saber mais</p> <button class="btn2 float-left" onClick="location.href='https://www2.correios.com.br/sistemas/rastreamento/'">Nova Consulta</button> </div> <br /><br /> <div class="act-extras" style="display:block; width:430px"> <form name="frmprint" id="frmprint" method="post" action="sistemas/rastreamento/newprint.cfm" target="_blank"> <input type="hidden" name="objetos" value="PU524124388BR" /> </form> <a class="icon print sro" onClick="document.getElementById('frmprint').submit();"><img width="22px" height="22px" src="home2014/img/trans.gif">Imprimir</a> <!-- pode ser suspenso--> <!-- Não está logado, mas passivel de suspensão --> <a class="icon stop sro" onClick="document.getElementById('formTelaSusp').submit();"><img class="fechar" width="22px" height="22px" src="home2014/img/trans.gif">Suspender Entrega</a> <form name="formTelaSusp" id="formTelaSusp" method="post" action="sistemas/rastreamento/suspensaoEntrega/dsp/default.cfm"> <input type="hidden" name="ObjAsuspender" id="ObjAsuspender" value="PU524124388BR" /> </form> </div> <br /> <div class="destaque" style="background-color:##eee; width:98%; display:table; height:auto; padding:10px; margin-bottom:5px;"> <div id="qrcodeTable" style="float:left; width:100px"> <img src="https://chart.apis.google.com/chart?cht=qr&chl=PU524124388BR&chs=116x116" alt="PU524124388BR - QR code"/> </div> <div style="float:left; width:200px; padding:10px 13px; font-size:1.3em;"> Acesse o aplicativo dos Correios e leia o c&oacute;digo 2D ao lado. Voc&ecirc; n&atilde;o precisar&aacute; digitar o c&oacute;digo do objeto e poder&aacute; salvar na sua lista de favoritos. </div> <div style="float:left; width:100px; margin:3px;"> <a href="https://itunes.apple.com/us/app/sro-mobile/id998782060?l=pt&ls=1&mt=8" target="_blank"> <img src="home2014/img/sro/Apple-store.png" width="138" height="43" style="border:2px solid #eee"> </a> <a href="https://play.google.com/store/apps/details?id=br.com.correios.srocorreios" target="_blank"> <img src="home2014/img/sro/Google-store.png" width="138" height="43" style="border:2px solid #eee"> </a> </div> </div> <br /> <a href="https://www.correios.com.br/banner-sro/link-banner-sro" target="_blank"><img src="https://www.correios.com.br/banner-sro/banner_sro/" width="480px" height="94px" /></a> </div> <div class="modal"> <div id="inline_content2" class="inline_content"> <h3><br /> <br /> O status do objeto pesquisado est&aacute; finalizado.<br />N&atilde;o &eacute; poss&iacute;vel solicitar notifica&ccedil;&atilde;o por SMS.</h3> </div> <script src="sistemas/rastreamento/js/jquery.validationEngine.js" type="text/javascript"></script> <script src="sistemas/rastreamento/js/jquery.validationEngine-pt.js" type="text/javascript"></script> <script> //$(document).ready(function(){$("#formSms").validationEngine();}); </script> </div> <script type="text/javascript" src="sistemas/rastreamento/js/sro.js"></script> <script type="text/javascript" src="sistemas/rastreamento/js/jquery.maskedinput.min.js"></script> <script type="text/javascript" src="sistemas/rastreamento/js/MascaraValidacao.js"></script> <!-- TipoBD(01) --> </div> </div> <br class="clr" /> </div> </div> </div> </div><!-- class="wrap" --> <!-- laminas --> <div class="footer"> <div class="wrap"> <div class="column-footer"> <h3><a target="_blank" href="http://apps2.correios.com.br/faleconosco/app/index.php">Fale Conosco</a></h3> <ul> <li class="node"><a href="sistemas/falecomoscorreios">Manifestação via Internet</a> <ul> <li><a href="sistemas/falecomoscorreios">Fale Conosco pelo site</a></li> </ul> </li> <li class="node"> <a href="sistemas/falecomoscorreios">Atendimento telef&ocirc;nico</a> <ul> <li>3003 0100 (Capitais e Regi&atilde;o Metropolitanas)</li> <li>0800 725 7282 (Demais localidades)</li> <li>0800 725 0100 (Sugest&otilde;es ou reclama&ccedil;&otilde;es)</li> <li>0800 725 0898 (exclusivo para portadores <br/> de deficiência auditiva) </li> <li>3003 1383 (Informações Banco Postal)</li> </ul> </li> <li class="node"> <a href="sistemas/agencias/">Rede de atendimento</a> <ul> <li><a href="sistemas/agencias">Consulte endereços e horários de atendimentos <br /> das agências dos Correios</a></li> </ul> </li> <li class="node"> <a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/ouvidoria">Ouvidoria</a> </li> </ul> </div> <div class="column-footer"> <h3><a href="">Portal Correios</a></h3> <ul> <li><a href="http://www.correios.com.br/sitemap">Mapa do site</a></li> <li><a href="sistemas/rastreamento/default.cfm">Rastreamento de objetos</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/sala-de-imprensa/fale-com-a-assessoria-de-comunicacao">Sala de Imprensa</a></li> <li><a href="institucional/concursos/correios/default.cfm">Concursos</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/patrocinio/patrocinio">Patroc&iacute;nios</a></li> <li><a href="http://www.correios.com.br/para-voce/correios-de-a-a-z/contatos-comerciais">Contatos comerciais</a></li> <li><a href="http://www.correios.com.br/correios/sobre-correios/a-empresa/carta-de-servicos-ao-cidadao">Carta de servi&ccedil;os ao cidad&atilde;o</a></li> <li><a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/denuncias/denuncias/">Denúncia</a></li> <li><a href="http://www.mc.gov.br/">Minist&eacute;rio das Comunica&ccedil;&otilde;es</a></li> </ul> </div> <div class="column-footer"> <h3><a href="">Outros sites dos Correios</a></h3> <ul> <li><a href="http://www.correios.com.br/para-voce">Correios para voc&ecirc;</a></li> <li><a href="http://www.correios.com.br/para-sua-empresa">Correios para sua empresa</a></li> <li><a href="http://www.correios.com.br/sobre-correios">Sobre Correios</a></li> <li><a href="http://shopping.correios.com.br/correiosonline">Loja virtual dos Correios</a></li> <li><a href="http://blog.correios.com.br/correios">Blog dos Correios</a></li> <li><a href="http://blog.correios.com.br/filatelia">Espa&ccedil;o da Filatelia</a></li> <li><a href="http://m.correios.com.br/">Correios Mobile</a></li> <li><a href="http://www2.correios.com.br/">Sistemas dos Correios</a></li> </ul> </div> <div class="copy"> <a href="http://www.correios.com.br/sobre-correios/fale-com-os-correios/politica-de-privacidade-e-notas-legais/">Pol&iacute;tica de Privacidade e notas legais</a> - © Copyright 2018 Correios - Todos os direitos reservados. </div> </div> </div> </div> </body></html> """ @pytest.fixture def tracking_events_dict( tracking_code ): return [ {'code': tracking_code, 'date': '02/03/2021 13:54', 'location': 'SAO PAULO / SP', 'description': 'Objeto entregue ao destinatário'}, {'code': tracking_code, 'date': '02/03/2021 08:59', 'location': 'SAO PAULO / SP', 'description': 'Objeto saiu para entrega ao destinatário'}, {'code': tracking_code, 'date': '02/03/2021 05:09', 'location': 'SAO PAULO / SP', 'description': 'Objeto em trânsito - por favor aguarde de Unidade de Tratamento em SAO PAULO / SP para Unidade de Distribuição em SAO PAULO / SP'}, {'code': tracking_code, 'date': '01/03/2021 17:57', 'location': 'SAO PAULO / SP', 'description': 'Objeto em trânsito - por favor aguarde de Agência dos Correios em SAO PAULO / SP para Unidade de Tratamento em SAO PAULO / SP'}, {'code': tracking_code, 'date': '01/03/2021 17:27', 'location': 'SAO PAULO / SP', 'description': 'Objeto postado'} ] @pytest.fixture def tracking_events_list( tracking_code ): return [ TrackingEvent(tracking_code, '02/03/2021 13:54', 'SAO PAULO / SP', 'Objeto entregue ao destinatário'), TrackingEvent(tracking_code, '02/03/2021 08:59', 'SAO PAULO / SP', 'Objeto saiu para entrega ao destinatário'), TrackingEvent(tracking_code, '02/03/2021 05:09', 'SAO PAULO / SP', 'Objeto em trânsito - por favor aguarde de Unidade de Tratamento em SAO PAULO / SP para Unidade de Distribuição em SAO PAULO / SP'), TrackingEvent(tracking_code, '01/03/2021 17:57', 'SAO PAULO / SP', 'Objeto em trânsito - por favor aguarde de Agência dos Correios em SAO PAULO / SP para Unidade de Tratamento em SAO PAULO / SP'), TrackingEvent(tracking_code, '01/03/2021 17:27', 'SAO PAULO / SP', 'Objeto postado') ] @pytest.fixture def tracking_code(): return 'ON769530126BR'
true
true
1c354b08bcf0fd5ba912adfd753b709ddd75dd45
21,566
py
Python
tests/text_quotations_test.py
joelthe1/talon
18de8bc35d8457a5ddfae1e52b9ee995422b4884
[ "Apache-2.0" ]
null
null
null
tests/text_quotations_test.py
joelthe1/talon
18de8bc35d8457a5ddfae1e52b9ee995422b4884
[ "Apache-2.0" ]
null
null
null
tests/text_quotations_test.py
joelthe1/talon
18de8bc35d8457a5ddfae1e52b9ee995422b4884
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import from tests.fixtures import STANDARD_REPLIES from talon import quotations from six.moves import range from nose.tools import eq_ from mock import patch import email.iterators import six import os @patch.object(quotations, 'MAX_LINES_COUNT', 1) def test_too_many_lines(): msg_body = """Test reply Hi -----Original Message----- Test""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_somebody_wrote(): msg_body = """Test reply On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> wrote: > > Test > > Roman""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_polymail(): msg_body = """Test reply On Tue, Apr 11, 2017 at 10:07 PM John Smith < mailto:John Smith <johnsmith@gmail.com> > wrote: Test quoted data """ eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_sent_from_samsung_smb_wrote(): msg_body = """Test reply Sent from Samsung MobileName <address@example.com> wrote: > > Test > > Roman""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_wrote_somebody(): eq_('Lorem', quotations.extract_from_plain( """Lorem Op 13-02-2014 3:18 schreef Julius Caesar <pantheon@rome.com>: Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse. """)) def test_pattern_on_date_somebody_wrote_date_with_slashes(): msg_body = """Test reply On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > > Test. > > Roman""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_date_time_email_splitter(): msg_body = """Test reply 2014-10-17 11:28 GMT+03:00 Postmaster < postmaster@sandboxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.mailgun.org>: > First from site > """ eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_somebody_wrote_allows_space_in_front(): msg_body = """Thanks Thanmai On Mar 8, 2012 9:59 AM, "Example.com" < r+7f1b094ceb90e18cca93d53d3703feae@example.com> wrote: >** > Blah-blah-blah""" eq_("Thanks Thanmai", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_somebody_sent(): msg_body = """Test reply On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> sent: > > Test > > Roman""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_appointment(): msg_body = """Response 10/19/2017 @ 9:30 am for physical therapy Bla 1517 4th Avenue Ste 300 London CA 19129, 555-421-6780 John Doe, FCLS Mailgun Inc 555-941-0697 From: from@example.com [mailto:from@example.com] Sent: Wednesday, October 18, 2017 2:05 PM To: John Doer - SIU <jd@example.com> Subject: RE: Claim # 5551188-1 Text""" expected = """Response 10/19/2017 @ 9:30 am for physical therapy Bla 1517 4th Avenue Ste 300 London CA 19129, 555-421-6780 John Doe, FCLS Mailgun Inc 555-941-0697""" eq_(expected, quotations.extract_from_plain(msg_body)) def test_line_starts_with_on(): msg_body = """Blah-blah-blah On blah-blah-blah""" eq_(msg_body, quotations.extract_from_plain(msg_body)) def test_reply_and_quotation_splitter_share_line(): # reply lines and 'On <date> <person> wrote:' splitter pattern # are on the same line msg_body = """reply On Wed, Apr 4, 2012 at 3:59 PM, bob@example.com wrote: > Hi""" eq_('reply', quotations.extract_from_plain(msg_body)) # test pattern '--- On <date> <person> wrote:' with reply text on # the same line msg_body = """reply--- On Wed, Apr 4, 2012 at 3:59 PM, me@domain.com wrote: > Hi""" eq_('reply', quotations.extract_from_plain(msg_body)) # test pattern '--- On <date> <person> wrote:' with reply text containing # '-' symbol msg_body = """reply bla-bla - bla--- On Wed, Apr 4, 2012 at 3:59 PM, me@domain.com wrote: > Hi""" reply = """reply bla-bla - bla""" eq_(reply, quotations.extract_from_plain(msg_body)) def _check_pattern_original_message(original_message_indicator): msg_body = u"""Test reply -----{}----- Test""" eq_('Test reply', quotations.extract_from_plain( msg_body.format(six.text_type(original_message_indicator)))) def test_english_original_message(): _check_pattern_original_message('Original Message') _check_pattern_original_message('Reply Message') def test_german_original_message(): _check_pattern_original_message(u'Ursprüngliche Nachricht') _check_pattern_original_message('Antwort Nachricht') def test_danish_original_message(): _check_pattern_original_message('Oprindelig meddelelse') def test_reply_after_quotations(): msg_body = """On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > > Test Test reply""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_android_wrote(): msg_body = """Test reply ---- John Smith wrote ---- > quoted > text """ eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_reply_wraps_quotations(): msg_body = """Test reply On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > > Test Regards, Roman""" reply = """Test reply Regards, Roman""" eq_(reply, quotations.extract_from_plain(msg_body)) def test_reply_wraps_nested_quotations(): msg_body = """Test reply On 04/19/2011 07:10 AM, Roman Tkachenko wrote: >Test test >On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > >> >> Test. >> >> Roman Regards, Roman""" reply = """Test reply Regards, Roman""" eq_(reply, quotations.extract_from_plain(msg_body)) def test_quotation_separator_takes_2_lines(): msg_body = """Test reply On Fri, May 6, 2011 at 6:03 PM, Roman Tkachenko from Hacker News <roman@definebox.com> wrote: > Test. > > Roman Regards, Roman""" reply = """Test reply Regards, Roman""" eq_(reply, quotations.extract_from_plain(msg_body)) def test_quotation_separator_takes_3_lines(): msg_body = """Test reply On Nov 30, 2011, at 12:47 PM, Somebody < 416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4@somebody.domain.com> wrote: Test message """ eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_short_quotation(): msg_body = """Hi On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > Hello""" eq_("Hi", quotations.extract_from_plain(msg_body)) def test_with_indent(): msg_body = """YOLO salvia cillum kogi typewriter mumblecore cardigan skateboard Austin. ------On 12/29/1987 17:32 PM, Julius Caesar wrote----- Brunch mumblecore pug Marfa tofu, irure taxidermy hoodie readymade pariatur. """ eq_("YOLO salvia cillum kogi typewriter mumblecore cardigan skateboard Austin.", quotations.extract_from_plain(msg_body)) def test_short_quotation_with_newline(): msg_body = """Btw blah blah... On Tue, Jan 27, 2015 at 12:42 PM -0800, "Company" <christine.XXX@XXX.com> wrote: Hi Mark, Blah blah?  Thanks,Christine  On Jan 27, 2015, at 11:55 AM, Mark XXX <mark@XXX.com> wrote: Lorem ipsum? Mark Sent from Acompli""" eq_("Btw blah blah...", quotations.extract_from_plain(msg_body)) def test_pattern_date_email_with_unicode(): msg_body = """Replying ok 2011/4/7 Nathan \xd0\xb8ova <support@example.com> > Cool beans, scro""" eq_("Replying ok", quotations.extract_from_plain(msg_body)) def test_english_from_block(): eq_('Allo! Follow up MIME!', quotations.extract_from_plain("""Allo! Follow up MIME! From: somebody@example.com Sent: March-19-11 5:42 PM To: Somebody Subject: The manager has commented on your Loop Blah-blah-blah """)) def test_german_from_block(): eq_('Allo! Follow up MIME!', quotations.extract_from_plain( """Allo! Follow up MIME! Von: somebody@example.com Gesendet: Dienstag, 25. November 2014 14:59 An: Somebody Betreff: The manager has commented on your Loop Blah-blah-blah """)) def test_french_multiline_from_block(): eq_('Lorem ipsum', quotations.extract_from_plain( u"""Lorem ipsum De : Brendan xxx [mailto:brendan.xxx@xxx.com] Envoyé : vendredi 23 janvier 2015 16:39 À : Camille XXX Objet : Follow Up Blah-blah-blah """)) def test_french_from_block(): eq_('Lorem ipsum', quotations.extract_from_plain( u"""Lorem ipsum Le 23 janv. 2015 à 22:03, Brendan xxx <brendan.xxx@xxx.com<mailto:brendan.xxx@xxx.com>> a écrit: Bonjour!""")) def test_polish_from_block(): eq_('Lorem ipsum', quotations.extract_from_plain( u"""Lorem ipsum W dniu 28 stycznia 2015 01:53 użytkownik Zoe xxx <zoe.xxx@xxx.com> napisał: Blah! """)) def test_danish_from_block(): eq_('Allo! Follow up MIME!', quotations.extract_from_plain( """Allo! Follow up MIME! Fra: somebody@example.com Sendt: 19. march 2011 12:10 Til: Somebody Emne: The manager has commented on your Loop Blah-blah-blah """)) def test_swedish_from_block(): eq_('Allo! Follow up MIME!', quotations.extract_from_plain( u"""Allo! Follow up MIME! Från: Anno Sportel [mailto:anno.spoel@hsbcssad.com] Skickat: den 26 augusti 2015 14:45 Till: Isacson Leiff Ämne: RE: Week 36 Blah-blah-blah """)) def test_swedish_from_line(): eq_('Lorem', quotations.extract_from_plain( """Lorem Den 14 september, 2015 02:23:18, Valentino Rudy (valentino@rudy.be) skrev: Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse. """)) def test_norwegian_from_line(): eq_('Lorem', quotations.extract_from_plain( u"""Lorem På 14 september 2015 på 02:23:18, Valentino Rudy (valentino@rudy.be) skrev: Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse. """)) def test_dutch_from_block(): eq_('Gluten-free culpa lo-fi et nesciunt nostrud.', quotations.extract_from_plain( """Gluten-free culpa lo-fi et nesciunt nostrud. Op 17-feb.-2015, om 13:18 heeft Julius Caesar <pantheon@rome.com> het volgende geschreven: Small batch beard laboris tempor, non listicle hella Tumblr heirloom. """)) def test_vietnamese_from_block(): eq_('Hello', quotations.extract_from_plain( u"""Hello Vào 14:24 8 tháng 6, 2017, Hùng Nguyễn <hungnguyen@xxx.com> đã viết: > Xin chào """)) def test_quotation_marker_false_positive(): msg_body = """Visit us now for assistance... >>> >>> http://www.domain.com <<< Visit our site by clicking the link above""" eq_(msg_body, quotations.extract_from_plain(msg_body)) def test_link_closed_with_quotation_marker_on_new_line(): msg_body = '''8.45am-1pm From: somebody@example.com Date: Wed, 16 May 2012 00:15:02 -0600 <http://email.example.com/c/dHJhY2tpbmdfY29kZT1mMDdjYzBmNzM1ZjYzMGIxNT > <bob@example.com <mailto:bob@example.com> > Requester: ''' eq_('8.45am-1pm', quotations.extract_from_plain(msg_body)) def test_link_breaks_quotation_markers_sequence(): # link starts and ends on the same line msg_body = """Blah On Thursday, October 25, 2012 at 3:03 PM, life is short. on Bob wrote: > > Post a response by replying to this email > (http://example.com/c/YzOTYzMmE) > > life is short. (http://example.com/c/YzMmE) > """ eq_("Blah", quotations.extract_from_plain(msg_body)) # link starts after some text on one line and ends on another msg_body = """Blah On Monday, 24 September, 2012 at 3:46 PM, bob wrote: > [Ticket #50] test from bob > > View ticket (http://example.com/action _nonce=3dd518) > """ eq_("Blah", quotations.extract_from_plain(msg_body)) def test_from_block_starts_with_date(): msg_body = """Blah Date: Wed, 16 May 2012 00:15:02 -0600 To: klizhentas@example.com """ eq_('Blah', quotations.extract_from_plain(msg_body)) def test_bold_from_block(): msg_body = """Hi *From:* bob@example.com [mailto: bob@example.com] *Sent:* Wednesday, June 27, 2012 3:05 PM *To:* travis@example.com *Subject:* Hello """ eq_("Hi", quotations.extract_from_plain(msg_body)) def test_weird_date_format_in_date_block(): msg_body = """Blah Date: Fri=2C 28 Sep 2012 10:55:48 +0000 From: tickets@example.com To: bob@example.com Subject: [Ticket #8] Test """ eq_('Blah', quotations.extract_from_plain(msg_body)) def test_dont_parse_quotations_for_forwarded_messages(): msg_body = """FYI ---------- Forwarded message ---------- From: bob@example.com Date: Tue, Sep 4, 2012 at 1:35 PM Subject: Two line subject To: rob@example.com Text""" eq_(msg_body, quotations.extract_from_plain(msg_body)) def test_forwarded_message_in_quotations(): msg_body = """Blah -----Original Message----- FYI ---------- Forwarded message ---------- From: bob@example.com Date: Tue, Sep 4, 2012 at 1:35 PM Subject: Two line subject To: rob@example.com """ eq_("Blah", quotations.extract_from_plain(msg_body)) def test_mark_message_lines(): # e - empty line # s - splitter line # m - line starting with quotation marker '>' # t - the rest lines = ['Hello', '', # next line should be marked as splitter '_____________', 'From: foo@bar.com', 'Date: Wed, 16 May 2012 00:15:02 -0600', '', '> Hi', '', 'Signature'] eq_('tesssemet', quotations.mark_message_lines(lines)) lines = ['Just testing the email reply', '', 'Robert J Samson', 'Sent from my iPhone', '', # all 3 next lines should be marked as splitters 'On Nov 30, 2011, at 12:47 PM, Skapture <', ('416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4' '@skapture-staging.mailgun.org>'), 'wrote:', '', 'Tarmo Lehtpuu has posted the following message on'] eq_('tettessset', quotations.mark_message_lines(lines)) def test_process_marked_lines(): # quotations and last message lines are mixed # consider all to be a last message markers = 'tsemmtetm' lines = [str(i) for i in range(len(markers))] lines = [str(i) for i in range(len(markers))] eq_(lines, quotations.process_marked_lines(lines, markers)) # no splitter => no markers markers = 'tmm' lines = ['1', '2', '3'] eq_(['1', '2', '3'], quotations.process_marked_lines(lines, markers)) # text after splitter without markers is quotation markers = 'tst' lines = ['1', '2', '3'] eq_(['1'], quotations.process_marked_lines(lines, markers)) # message + quotation + signature markers = 'tsmt' lines = ['1', '2', '3', '4'] eq_(['1', '4'], quotations.process_marked_lines(lines, markers)) # message + <quotation without markers> + nested quotation markers = 'tstsmt' lines = ['1', '2', '3', '4', '5', '6'] eq_(['1'], quotations.process_marked_lines(lines, markers)) # test links wrapped with paranthesis # link starts on the marker line markers = 'tsmttem' lines = ['text', 'splitter', '>View (http://example.com', '/abc', ')', '', '> quote'] eq_(lines[:1], quotations.process_marked_lines(lines, markers)) # link starts on the new line markers = 'tmmmtm' lines = ['text', '>' '>', '>', '(http://example.com) > ', '> life is short. (http://example.com) ' ] eq_(lines[:1], quotations.process_marked_lines(lines, markers)) # check all "inline" replies markers = 'tsmtmtm' lines = ['text', 'splitter', '>', '(http://example.com)', '>', 'inline reply', '>'] eq_(lines, quotations.process_marked_lines(lines, markers)) # inline reply with link not wrapped in paranthesis markers = 'tsmtm' lines = ['text', 'splitter', '>', 'inline reply with link http://example.com', '>'] eq_(lines, quotations.process_marked_lines(lines, markers)) # inline reply with link wrapped in paranthesis markers = 'tsmtm' lines = ['text', 'splitter', '>', 'inline reply (http://example.com)', '>'] eq_(lines, quotations.process_marked_lines(lines, markers)) def test_preprocess(): msg = ('Hello\n' 'See <http://google.com\n' '> for more\n' 'information On Nov 30, 2011, at 12:47 PM, Somebody <\n' '416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n' '@example.com>' 'wrote:\n' '\n' '> Hi') # test the link is rewritten # 'On <date> <person> wrote:' pattern starts from a new line prepared_msg = ('Hello\n' 'See @@http://google.com\n' '@@ for more\n' 'information\n' ' On Nov 30, 2011, at 12:47 PM, Somebody <\n' '416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n' '@example.com>' 'wrote:\n' '\n' '> Hi') eq_(prepared_msg, quotations.preprocess(msg, '\n')) msg = """ > <http://teemcl.mailgun.org/u/**aD1mZmZiNGU5ODQwMDNkZWZlMTExNm** > MxNjQ4Y2RmOTNlMCZyPXNlcmdleS5v**YnlraG92JTQwbWFpbGd1bmhxLmNvbS** > Z0PSUyQSZkPWUwY2U<http://example.org/u/aD1mZmZiNGU5ODQwMDNkZWZlMTExNmMxNjQ4Y> """ eq_(msg, quotations.preprocess(msg, '\n')) # 'On <date> <person> wrote' shouldn't be spread across too many lines msg = ('Hello\n' 'How are you? On Nov 30, 2011, at 12:47 PM,\n ' 'Example <\n' '416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n' '@example.org>' 'wrote:\n' '\n' '> Hi') eq_(msg, quotations.preprocess(msg, '\n')) msg = ('Hello On Nov 30, smb wrote:\n' 'Hi\n' 'On Nov 29, smb wrote:\n' 'hi') prepared_msg = ('Hello\n' ' On Nov 30, smb wrote:\n' 'Hi\n' 'On Nov 29, smb wrote:\n' 'hi') eq_(prepared_msg, quotations.preprocess(msg, '\n')) def test_preprocess_postprocess_2_links(): msg_body = "<http://link1> <http://link2>" eq_(msg_body, quotations.extract_from_plain(msg_body)) def body_iterator(msg, decode=False): for subpart in msg.walk(): payload = subpart.get_payload(decode=decode) if isinstance(payload, six.text_type): yield payload else: yield payload.decode('utf8') def test_standard_replies(): for filename in os.listdir(STANDARD_REPLIES): filename = os.path.join(STANDARD_REPLIES, filename) if not filename.endswith('.eml') or os.path.isdir(filename): continue with open(filename) as f: message = email.message_from_file(f) body = next(email.iterators.typed_subpart_iterator(message, subtype='plain')) text = ''.join(body_iterator(body, True)) stripped_text = quotations.extract_from_plain(text) reply_text_fn = filename[:-4] + '_reply_text' if os.path.isfile(reply_text_fn): with open(reply_text_fn) as f: reply_text = f.read().strip() else: reply_text = 'Hello' yield eq_, reply_text, stripped_text, \ "'%(reply)s' != %(stripped)s for %(fn)s" % \ {'reply': reply_text, 'stripped': stripped_text, 'fn': filename} def test_split_email(): msg = """From: Mr. X Date: 24 February 2016 To: Mr. Y Subject: Hi Attachments: none Goodbye. From: Mr. Y To: Mr. X Date: 24 February 2016 Subject: Hi Attachments: none Hello. On 24th February 2016 at 09.32am, Conal wrote: Hey! On Mon, 2016-10-03 at 09:45 -0600, Stangel, Dan wrote: > Mohan, > > We have not yet migrated the systems. > > Dan > > > -----Original Message----- > > Date: Mon, 2 Apr 2012 17:44:22 +0400 > > Subject: Test > > From: bob@xxx.mailgun.org > > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com > > > > Hi > > > > > From: bob@xxx.mailgun.org > > > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com > > > Date: Mon, 2 Apr 2012 17:44:22 +0400 > > > Subject: Test > > > Hi > > > > > > > """ expected_markers = "stttttsttttetesetesmmmmmmsmmmmmmmmmmmmmmmm" markers = quotations.split_emails(msg) eq_(markers, expected_markers) def test_feedback_below_left_unparsed(): msg_body = """Please enter your feedback below. Thank you. ------------------------------------- Enter Feedback Below ------------------------------------- The user experience was unparallelled. Please continue production. I'm sending payment to ensure that this line is intact.""" parsed = quotations.extract_from_plain(msg_body) eq_(msg_body, parsed.decode('utf8')) def test_appointment(): msg_body = """Invitation for an interview: Date: Wednesday 3, October 2011 Time: 7 : 00am Address: 130 Fox St Please bring in your ID.""" parsed = quotations.extract_from_plain(msg_body) eq_(msg_body, parsed.decode('utf8'))
25.135198
142
0.648011
from __future__ import absolute_import from tests.fixtures import STANDARD_REPLIES from talon import quotations from six.moves import range from nose.tools import eq_ from mock import patch import email.iterators import six import os @patch.object(quotations, 'MAX_LINES_COUNT', 1) def test_too_many_lines(): msg_body = """Test reply Hi -----Original Message----- Test""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_somebody_wrote(): msg_body = """Test reply On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> wrote: > > Test > > Roman""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_polymail(): msg_body = """Test reply On Tue, Apr 11, 2017 at 10:07 PM John Smith < mailto:John Smith <johnsmith@gmail.com> > wrote: Test quoted data """ eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_sent_from_samsung_smb_wrote(): msg_body = """Test reply Sent from Samsung MobileName <address@example.com> wrote: > > Test > > Roman""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_wrote_somebody(): eq_('Lorem', quotations.extract_from_plain( """Lorem Op 13-02-2014 3:18 schreef Julius Caesar <pantheon@rome.com>: Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse. """)) def test_pattern_on_date_somebody_wrote_date_with_slashes(): msg_body = """Test reply On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > > Test. > > Roman""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_date_time_email_splitter(): msg_body = """Test reply 2014-10-17 11:28 GMT+03:00 Postmaster < postmaster@sandboxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.mailgun.org>: > First from site > """ eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_somebody_wrote_allows_space_in_front(): msg_body = """Thanks Thanmai On Mar 8, 2012 9:59 AM, "Example.com" < r+7f1b094ceb90e18cca93d53d3703feae@example.com> wrote: >** > Blah-blah-blah""" eq_("Thanks Thanmai", quotations.extract_from_plain(msg_body)) def test_pattern_on_date_somebody_sent(): msg_body = """Test reply On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> sent: > > Test > > Roman""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_appointment(): msg_body = """Response 10/19/2017 @ 9:30 am for physical therapy Bla 1517 4th Avenue Ste 300 London CA 19129, 555-421-6780 John Doe, FCLS Mailgun Inc 555-941-0697 From: from@example.com [mailto:from@example.com] Sent: Wednesday, October 18, 2017 2:05 PM To: John Doer - SIU <jd@example.com> Subject: RE: Claim # 5551188-1 Text""" expected = """Response 10/19/2017 @ 9:30 am for physical therapy Bla 1517 4th Avenue Ste 300 London CA 19129, 555-421-6780 John Doe, FCLS Mailgun Inc 555-941-0697""" eq_(expected, quotations.extract_from_plain(msg_body)) def test_line_starts_with_on(): msg_body = """Blah-blah-blah On blah-blah-blah""" eq_(msg_body, quotations.extract_from_plain(msg_body)) def test_reply_and_quotation_splitter_share_line(): msg_body = """reply On Wed, Apr 4, 2012 at 3:59 PM, bob@example.com wrote: > Hi""" eq_('reply', quotations.extract_from_plain(msg_body)) msg_body = """reply--- On Wed, Apr 4, 2012 at 3:59 PM, me@domain.com wrote: > Hi""" eq_('reply', quotations.extract_from_plain(msg_body)) msg_body = """reply bla-bla - bla--- On Wed, Apr 4, 2012 at 3:59 PM, me@domain.com wrote: > Hi""" reply = """reply bla-bla - bla""" eq_(reply, quotations.extract_from_plain(msg_body)) def _check_pattern_original_message(original_message_indicator): msg_body = u"""Test reply -----{}----- Test""" eq_('Test reply', quotations.extract_from_plain( msg_body.format(six.text_type(original_message_indicator)))) def test_english_original_message(): _check_pattern_original_message('Original Message') _check_pattern_original_message('Reply Message') def test_german_original_message(): _check_pattern_original_message(u'Ursprüngliche Nachricht') _check_pattern_original_message('Antwort Nachricht') def test_danish_original_message(): _check_pattern_original_message('Oprindelig meddelelse') def test_reply_after_quotations(): msg_body = """On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > > Test Test reply""" eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_android_wrote(): msg_body = """Test reply ---- John Smith wrote ---- > quoted > text """ eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_reply_wraps_quotations(): msg_body = """Test reply On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > > Test Regards, Roman""" reply = """Test reply Regards, Roman""" eq_(reply, quotations.extract_from_plain(msg_body)) def test_reply_wraps_nested_quotations(): msg_body = """Test reply On 04/19/2011 07:10 AM, Roman Tkachenko wrote: >Test test >On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > >> >> Test. >> >> Roman Regards, Roman""" reply = """Test reply Regards, Roman""" eq_(reply, quotations.extract_from_plain(msg_body)) def test_quotation_separator_takes_2_lines(): msg_body = """Test reply On Fri, May 6, 2011 at 6:03 PM, Roman Tkachenko from Hacker News <roman@definebox.com> wrote: > Test. > > Roman Regards, Roman""" reply = """Test reply Regards, Roman""" eq_(reply, quotations.extract_from_plain(msg_body)) def test_quotation_separator_takes_3_lines(): msg_body = """Test reply On Nov 30, 2011, at 12:47 PM, Somebody < 416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4@somebody.domain.com> wrote: Test message """ eq_("Test reply", quotations.extract_from_plain(msg_body)) def test_short_quotation(): msg_body = """Hi On 04/19/2011 07:10 AM, Roman Tkachenko wrote: > Hello""" eq_("Hi", quotations.extract_from_plain(msg_body)) def test_with_indent(): msg_body = """YOLO salvia cillum kogi typewriter mumblecore cardigan skateboard Austin. ------On 12/29/1987 17:32 PM, Julius Caesar wrote----- Brunch mumblecore pug Marfa tofu, irure taxidermy hoodie readymade pariatur. """ eq_("YOLO salvia cillum kogi typewriter mumblecore cardigan skateboard Austin.", quotations.extract_from_plain(msg_body)) def test_short_quotation_with_newline(): msg_body = """Btw blah blah... On Tue, Jan 27, 2015 at 12:42 PM -0800, "Company" <christine.XXX@XXX.com> wrote: Hi Mark, Blah blah?  Thanks,Christine  On Jan 27, 2015, at 11:55 AM, Mark XXX <mark@XXX.com> wrote: Lorem ipsum? Mark Sent from Acompli""" eq_("Btw blah blah...", quotations.extract_from_plain(msg_body)) def test_pattern_date_email_with_unicode(): msg_body = """Replying ok 2011/4/7 Nathan \xd0\xb8ova <support@example.com> > Cool beans, scro""" eq_("Replying ok", quotations.extract_from_plain(msg_body)) def test_english_from_block(): eq_('Allo! Follow up MIME!', quotations.extract_from_plain("""Allo! Follow up MIME! From: somebody@example.com Sent: March-19-11 5:42 PM To: Somebody Subject: The manager has commented on your Loop Blah-blah-blah """)) def test_german_from_block(): eq_('Allo! Follow up MIME!', quotations.extract_from_plain( """Allo! Follow up MIME! Von: somebody@example.com Gesendet: Dienstag, 25. November 2014 14:59 An: Somebody Betreff: The manager has commented on your Loop Blah-blah-blah """)) def test_french_multiline_from_block(): eq_('Lorem ipsum', quotations.extract_from_plain( u"""Lorem ipsum De : Brendan xxx [mailto:brendan.xxx@xxx.com] Envoyé : vendredi 23 janvier 2015 16:39 À : Camille XXX Objet : Follow Up Blah-blah-blah """)) def test_french_from_block(): eq_('Lorem ipsum', quotations.extract_from_plain( u"""Lorem ipsum Le 23 janv. 2015 à 22:03, Brendan xxx <brendan.xxx@xxx.com<mailto:brendan.xxx@xxx.com>> a écrit: Bonjour!""")) def test_polish_from_block(): eq_('Lorem ipsum', quotations.extract_from_plain( u"""Lorem ipsum W dniu 28 stycznia 2015 01:53 użytkownik Zoe xxx <zoe.xxx@xxx.com> napisał: Blah! """)) def test_danish_from_block(): eq_('Allo! Follow up MIME!', quotations.extract_from_plain( """Allo! Follow up MIME! Fra: somebody@example.com Sendt: 19. march 2011 12:10 Til: Somebody Emne: The manager has commented on your Loop Blah-blah-blah """)) def test_swedish_from_block(): eq_('Allo! Follow up MIME!', quotations.extract_from_plain( u"""Allo! Follow up MIME! Från: Anno Sportel [mailto:anno.spoel@hsbcssad.com] Skickat: den 26 augusti 2015 14:45 Till: Isacson Leiff Ämne: RE: Week 36 Blah-blah-blah """)) def test_swedish_from_line(): eq_('Lorem', quotations.extract_from_plain( """Lorem Den 14 september, 2015 02:23:18, Valentino Rudy (valentino@rudy.be) skrev: Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse. """)) def test_norwegian_from_line(): eq_('Lorem', quotations.extract_from_plain( u"""Lorem På 14 september 2015 på 02:23:18, Valentino Rudy (valentino@rudy.be) skrev: Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse. """)) def test_dutch_from_block(): eq_('Gluten-free culpa lo-fi et nesciunt nostrud.', quotations.extract_from_plain( """Gluten-free culpa lo-fi et nesciunt nostrud. Op 17-feb.-2015, om 13:18 heeft Julius Caesar <pantheon@rome.com> het volgende geschreven: Small batch beard laboris tempor, non listicle hella Tumblr heirloom. """)) def test_vietnamese_from_block(): eq_('Hello', quotations.extract_from_plain( u"""Hello Vào 14:24 8 tháng 6, 2017, Hùng Nguyễn <hungnguyen@xxx.com> đã viết: > Xin chào """)) def test_quotation_marker_false_positive(): msg_body = """Visit us now for assistance... >>> >>> http://www.domain.com <<< Visit our site by clicking the link above""" eq_(msg_body, quotations.extract_from_plain(msg_body)) def test_link_closed_with_quotation_marker_on_new_line(): msg_body = '''8.45am-1pm From: somebody@example.com Date: Wed, 16 May 2012 00:15:02 -0600 <http://email.example.com/c/dHJhY2tpbmdfY29kZT1mMDdjYzBmNzM1ZjYzMGIxNT > <bob@example.com <mailto:bob@example.com> > Requester: ''' eq_('8.45am-1pm', quotations.extract_from_plain(msg_body)) def test_link_breaks_quotation_markers_sequence(): msg_body = """Blah On Thursday, October 25, 2012 at 3:03 PM, life is short. on Bob wrote: > > Post a response by replying to this email > (http://example.com/c/YzOTYzMmE) > > life is short. (http://example.com/c/YzMmE) > """ eq_("Blah", quotations.extract_from_plain(msg_body)) msg_body = """Blah On Monday, 24 September, 2012 at 3:46 PM, bob wrote: > [Ticket #50] test from bob > > View ticket (http://example.com/action _nonce=3dd518) > """ eq_("Blah", quotations.extract_from_plain(msg_body)) def test_from_block_starts_with_date(): msg_body = """Blah Date: Wed, 16 May 2012 00:15:02 -0600 To: klizhentas@example.com """ eq_('Blah', quotations.extract_from_plain(msg_body)) def test_bold_from_block(): msg_body = """Hi *From:* bob@example.com [mailto: bob@example.com] *Sent:* Wednesday, June 27, 2012 3:05 PM *To:* travis@example.com *Subject:* Hello """ eq_("Hi", quotations.extract_from_plain(msg_body)) def test_weird_date_format_in_date_block(): msg_body = """Blah Date: Fri=2C 28 Sep 2012 10:55:48 +0000 From: tickets@example.com To: bob@example.com Subject: [Ticket #8] Test """ eq_('Blah', quotations.extract_from_plain(msg_body)) def test_dont_parse_quotations_for_forwarded_messages(): msg_body = """FYI ---------- Forwarded message ---------- From: bob@example.com Date: Tue, Sep 4, 2012 at 1:35 PM Subject: Two line subject To: rob@example.com Text""" eq_(msg_body, quotations.extract_from_plain(msg_body)) def test_forwarded_message_in_quotations(): msg_body = """Blah -----Original Message----- FYI ---------- Forwarded message ---------- From: bob@example.com Date: Tue, Sep 4, 2012 at 1:35 PM Subject: Two line subject To: rob@example.com """ eq_("Blah", quotations.extract_from_plain(msg_body)) def test_mark_message_lines(): lines = ['Hello', '', '_____________', 'From: foo@bar.com', 'Date: Wed, 16 May 2012 00:15:02 -0600', '', '> Hi', '', 'Signature'] eq_('tesssemet', quotations.mark_message_lines(lines)) lines = ['Just testing the email reply', '', 'Robert J Samson', 'Sent from my iPhone', '', 'On Nov 30, 2011, at 12:47 PM, Skapture <', ('416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4' '@skapture-staging.mailgun.org>'), 'wrote:', '', 'Tarmo Lehtpuu has posted the following message on'] eq_('tettessset', quotations.mark_message_lines(lines)) def test_process_marked_lines(): markers = 'tsemmtetm' lines = [str(i) for i in range(len(markers))] lines = [str(i) for i in range(len(markers))] eq_(lines, quotations.process_marked_lines(lines, markers)) markers = 'tmm' lines = ['1', '2', '3'] eq_(['1', '2', '3'], quotations.process_marked_lines(lines, markers)) markers = 'tst' lines = ['1', '2', '3'] eq_(['1'], quotations.process_marked_lines(lines, markers)) markers = 'tsmt' lines = ['1', '2', '3', '4'] eq_(['1', '4'], quotations.process_marked_lines(lines, markers)) markers = 'tstsmt' lines = ['1', '2', '3', '4', '5', '6'] eq_(['1'], quotations.process_marked_lines(lines, markers)) markers = 'tsmttem' lines = ['text', 'splitter', '>View (http://example.com', '/abc', ')', '', '> quote'] eq_(lines[:1], quotations.process_marked_lines(lines, markers)) markers = 'tmmmtm' lines = ['text', '>' '>', '>', '(http://example.com) > ', '> life is short. (http://example.com) ' ] eq_(lines[:1], quotations.process_marked_lines(lines, markers)) markers = 'tsmtmtm' lines = ['text', 'splitter', '>', '(http://example.com)', '>', 'inline reply', '>'] eq_(lines, quotations.process_marked_lines(lines, markers)) markers = 'tsmtm' lines = ['text', 'splitter', '>', 'inline reply with link http://example.com', '>'] eq_(lines, quotations.process_marked_lines(lines, markers)) markers = 'tsmtm' lines = ['text', 'splitter', '>', 'inline reply (http://example.com)', '>'] eq_(lines, quotations.process_marked_lines(lines, markers)) def test_preprocess(): msg = ('Hello\n' 'See <http://google.com\n' '> for more\n' 'information On Nov 30, 2011, at 12:47 PM, Somebody <\n' '416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n' '@example.com>' 'wrote:\n' '\n' '> Hi') prepared_msg = ('Hello\n' 'See @@http://google.com\n' '@@ for more\n' 'information\n' ' On Nov 30, 2011, at 12:47 PM, Somebody <\n' '416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n' '@example.com>' 'wrote:\n' '\n' '> Hi') eq_(prepared_msg, quotations.preprocess(msg, '\n')) msg = """ > <http://teemcl.mailgun.org/u/**aD1mZmZiNGU5ODQwMDNkZWZlMTExNm** > MxNjQ4Y2RmOTNlMCZyPXNlcmdleS5v**YnlraG92JTQwbWFpbGd1bmhxLmNvbS** > Z0PSUyQSZkPWUwY2U<http://example.org/u/aD1mZmZiNGU5ODQwMDNkZWZlMTExNmMxNjQ4Y> """ eq_(msg, quotations.preprocess(msg, '\n')) msg = ('Hello\n' 'How are you? On Nov 30, 2011, at 12:47 PM,\n ' 'Example <\n' '416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n' '@example.org>' 'wrote:\n' '\n' '> Hi') eq_(msg, quotations.preprocess(msg, '\n')) msg = ('Hello On Nov 30, smb wrote:\n' 'Hi\n' 'On Nov 29, smb wrote:\n' 'hi') prepared_msg = ('Hello\n' ' On Nov 30, smb wrote:\n' 'Hi\n' 'On Nov 29, smb wrote:\n' 'hi') eq_(prepared_msg, quotations.preprocess(msg, '\n')) def test_preprocess_postprocess_2_links(): msg_body = "<http://link1> <http://link2>" eq_(msg_body, quotations.extract_from_plain(msg_body)) def body_iterator(msg, decode=False): for subpart in msg.walk(): payload = subpart.get_payload(decode=decode) if isinstance(payload, six.text_type): yield payload else: yield payload.decode('utf8') def test_standard_replies(): for filename in os.listdir(STANDARD_REPLIES): filename = os.path.join(STANDARD_REPLIES, filename) if not filename.endswith('.eml') or os.path.isdir(filename): continue with open(filename) as f: message = email.message_from_file(f) body = next(email.iterators.typed_subpart_iterator(message, subtype='plain')) text = ''.join(body_iterator(body, True)) stripped_text = quotations.extract_from_plain(text) reply_text_fn = filename[:-4] + '_reply_text' if os.path.isfile(reply_text_fn): with open(reply_text_fn) as f: reply_text = f.read().strip() else: reply_text = 'Hello' yield eq_, reply_text, stripped_text, \ "'%(reply)s' != %(stripped)s for %(fn)s" % \ {'reply': reply_text, 'stripped': stripped_text, 'fn': filename} def test_split_email(): msg = """From: Mr. X Date: 24 February 2016 To: Mr. Y Subject: Hi Attachments: none Goodbye. From: Mr. Y To: Mr. X Date: 24 February 2016 Subject: Hi Attachments: none Hello. On 24th February 2016 at 09.32am, Conal wrote: Hey! On Mon, 2016-10-03 at 09:45 -0600, Stangel, Dan wrote: > Mohan, > > We have not yet migrated the systems. > > Dan > > > -----Original Message----- > > Date: Mon, 2 Apr 2012 17:44:22 +0400 > > Subject: Test > > From: bob@xxx.mailgun.org > > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com > > > > Hi > > > > > From: bob@xxx.mailgun.org > > > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com > > > Date: Mon, 2 Apr 2012 17:44:22 +0400 > > > Subject: Test > > > Hi > > > > > > > """ expected_markers = "stttttsttttetesetesmmmmmmsmmmmmmmmmmmmmmmm" markers = quotations.split_emails(msg) eq_(markers, expected_markers) def test_feedback_below_left_unparsed(): msg_body = """Please enter your feedback below. Thank you. ------------------------------------- Enter Feedback Below ------------------------------------- The user experience was unparallelled. Please continue production. I'm sending payment to ensure that this line is intact.""" parsed = quotations.extract_from_plain(msg_body) eq_(msg_body, parsed.decode('utf8')) def test_appointment(): msg_body = """Invitation for an interview: Date: Wednesday 3, October 2011 Time: 7 : 00am Address: 130 Fox St Please bring in your ID.""" parsed = quotations.extract_from_plain(msg_body) eq_(msg_body, parsed.decode('utf8'))
true
true
1c354b93e7aa0731074c6f491615d8409cc656ae
3,151
py
Python
axelrod/tests/strategies/test_darwin.py
dashiellfryer/Axelrod
0d684b3273d15e3e0ecf70be8e893fffc5277c84
[ "MIT" ]
null
null
null
axelrod/tests/strategies/test_darwin.py
dashiellfryer/Axelrod
0d684b3273d15e3e0ecf70be8e893fffc5277c84
[ "MIT" ]
null
null
null
axelrod/tests/strategies/test_darwin.py
dashiellfryer/Axelrod
0d684b3273d15e3e0ecf70be8e893fffc5277c84
[ "MIT" ]
null
null
null
"""Tests for the Darwin PD strategy.""" import axelrod from .test_player import TestPlayer C, D = axelrod.Action.C, axelrod.Action.D class TestDarwin(TestPlayer): name = "Darwin" player = axelrod.Darwin expected_classifier = { "memory_depth": float("inf"), "stochastic": False, "makes_use_of": set(), "long_run_time": False, "inspects_source": True, "manipulates_source": False, "manipulates_state": True, } @classmethod def tearDownClass(cls): """After all tests have run, makes sure the Darwin genome is reset.""" cls.player.reset_genome() super(TestDarwin, cls).tearDownClass() def setUp(self): """Each test starts with a fresh genome.""" self.player.reset_genome() super(TestDarwin, self).setUp() def test_setup(self): player = self.player() self.assertEqual(player.genome, [C]) self.assertEqual(player.history, []) def test_foil_strategy_inspection(self): self.assertEqual(self.player().foil_strategy_inspection(), C) def test_strategy(self): p1 = self.player() p1.reset() self.versus_test( axelrod.Cooperator(), expected_actions=[(C, C)] * 5, attrs={"genome": [C] * 5}, ) expected_genome = [D] * 4 + [C] self.versus_test( axelrod.Defector(), expected_actions=[(C, D)] * 5, attrs={"genome": expected_genome}, ) # uses genome expected_actions = [(C, C)] + [(D, C)] * 3 + [(C, C)] * 2 self.versus_test(axelrod.Cooperator(), expected_actions) def test_against_geller_and_mindreader(self): self.versus_test( axelrod.GellerCooperator(), expected_actions=[(C, C)] * 2, attrs={"genome": [C, C]}, ) self.versus_test( axelrod.MindReader(), expected_actions=[(C, D)] * 2, attrs={"genome": [D, C]}, ) def test_reset_history_and_attributes(self): # Overwrite this method because Darwin does not reset self.versus_test(axelrod.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) p1 = self.player() self.assertEqual(p1.genome, [D, C, C, C, D]) p1.reset() self.assertEqual(len(p1.history), 0) self.assertEqual(p1.genome, [C, C, C, C, D]) def test_all_darwin_instances_share_one_genome(self): p1 = self.player() p2 = self.player() self.assertIs(p1.genome, p2.genome) self.versus_test(axelrod.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) self.assertEqual(p2.genome, [D, C, C, C, D]) self.assertIs(p1.genome, p2.genome) p3 = self.player() self.assertIs(p3.genome, p2.genome) def test_reset_genome(self): self.versus_test(axelrod.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) self.player.reset_genome() self.assertEqual(self.player().genome, [C]) def equality_of_players_test(self, p1, p2, seed, opponent): return True
29.726415
86
0.579499
import axelrod from .test_player import TestPlayer C, D = axelrod.Action.C, axelrod.Action.D class TestDarwin(TestPlayer): name = "Darwin" player = axelrod.Darwin expected_classifier = { "memory_depth": float("inf"), "stochastic": False, "makes_use_of": set(), "long_run_time": False, "inspects_source": True, "manipulates_source": False, "manipulates_state": True, } @classmethod def tearDownClass(cls): cls.player.reset_genome() super(TestDarwin, cls).tearDownClass() def setUp(self): self.player.reset_genome() super(TestDarwin, self).setUp() def test_setup(self): player = self.player() self.assertEqual(player.genome, [C]) self.assertEqual(player.history, []) def test_foil_strategy_inspection(self): self.assertEqual(self.player().foil_strategy_inspection(), C) def test_strategy(self): p1 = self.player() p1.reset() self.versus_test( axelrod.Cooperator(), expected_actions=[(C, C)] * 5, attrs={"genome": [C] * 5}, ) expected_genome = [D] * 4 + [C] self.versus_test( axelrod.Defector(), expected_actions=[(C, D)] * 5, attrs={"genome": expected_genome}, ) expected_actions = [(C, C)] + [(D, C)] * 3 + [(C, C)] * 2 self.versus_test(axelrod.Cooperator(), expected_actions) def test_against_geller_and_mindreader(self): self.versus_test( axelrod.GellerCooperator(), expected_actions=[(C, C)] * 2, attrs={"genome": [C, C]}, ) self.versus_test( axelrod.MindReader(), expected_actions=[(C, D)] * 2, attrs={"genome": [D, C]}, ) def test_reset_history_and_attributes(self): self.versus_test(axelrod.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) p1 = self.player() self.assertEqual(p1.genome, [D, C, C, C, D]) p1.reset() self.assertEqual(len(p1.history), 0) self.assertEqual(p1.genome, [C, C, C, C, D]) def test_all_darwin_instances_share_one_genome(self): p1 = self.player() p2 = self.player() self.assertIs(p1.genome, p2.genome) self.versus_test(axelrod.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) self.assertEqual(p2.genome, [D, C, C, C, D]) self.assertIs(p1.genome, p2.genome) p3 = self.player() self.assertIs(p3.genome, p2.genome) def test_reset_genome(self): self.versus_test(axelrod.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) self.player.reset_genome() self.assertEqual(self.player().genome, [C]) def equality_of_players_test(self, p1, p2, seed, opponent): return True
true
true
1c354d8943d8ae1c961e81b29a5162cdf06ad82e
12,600
py
Python
vega/trainer/callbacks/timm_trainer_callback.py
jie311/vega
1bba6100ead802697e691403b951e6652a99ccae
[ "MIT" ]
null
null
null
vega/trainer/callbacks/timm_trainer_callback.py
jie311/vega
1bba6100ead802697e691403b951e6652a99ccae
[ "MIT" ]
null
null
null
vega/trainer/callbacks/timm_trainer_callback.py
jie311/vega
1bba6100ead802697e691403b951e6652a99ccae
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """TIMM method trainer.""" import os import importlib import torch from timm import create_model from timm.optim.optim_factory import create_optimizer, add_weight_decay from timm.scheduler import create_scheduler from timm.data import Dataset, create_transform from timm.utils import ModelEma from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.data.loader import fast_collate, PrefetchLoader from timm.data.distributed_sampler import OrderedDistributedSampler try: import apex from apex import amp except Exception: pass import horovod.torch as hvd import vega from vega.common import Config from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.trainer.callbacks import Callback def create_loader( dataset, input_size, batch_size, is_training=False, use_prefetcher=True, rand_erase_prob=0., rand_erase_mode='const', rand_erase_count=1, color_jitter=0.4, auto_augment=None, interpolation='bilinear', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_workers=1, distributed=False, crop_pct=None, collate_fn=None, fp16=False, tf_preprocessing=False, world_size=None, rank=None ): """Create data loader for timm.""" dataset.transform = create_transform( input_size, is_training=is_training, use_prefetcher=use_prefetcher, color_jitter=color_jitter, auto_augment=auto_augment, interpolation=interpolation, mean=mean, std=std, crop_pct=crop_pct, tf_preprocessing=tf_preprocessing, ) sampler = None if distributed: if is_training: sampler = torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=world_size, rank=rank) else: # This will add extra duplicate entries to result in equal num # of samples per-process, will slightly alter validation results sampler = OrderedDistributedSampler(dataset, num_replicas=world_size, rank=rank) if collate_fn is None: collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=sampler is None and is_training, num_workers=num_workers, sampler=sampler, collate_fn=collate_fn, drop_last=is_training, ) if use_prefetcher: loader = PrefetchLoader( loader, re_prob=rand_erase_prob if is_training else 0., re_mode=rand_erase_mode, re_count=rand_erase_count, mean=mean, std=std, fp16=fp16) return loader @ClassFactory.register(ClassType.CALLBACK) class TimmTrainerCallback(Callback): """A special callback for TimmTrainer.""" disable_callbacks = ["LearningRateScheduler", "ModelStatistics", "ModelBuilder"] def before_train(self, logs=None): """Be called before the training process.""" self._init_all_settings() def before_epoch(self, epoch, logs=None): """Be called before each epoch.""" if self.distributed: self.trainer.train_loader.sampler.set_epoch(epoch) self.num_updates = epoch * len(self.trainer.train_loader) self.epoch = epoch self.trainer.model.train() def make_batch(self, batch): """Prepare batch data for train_step.""" input, target = batch if not self.config.prefetcher: if vega.is_gpu_device(): input, target = input.cuda(), target.cuda() elif vega.is_npu_device(): input, target = input.npu(), target.npu() return input, target def train_step(self, batch): """Train one step of model.""" input, target = batch self.trainer.optimizer.zero_grad() logits = self.trainer.model(input) loss = self.trainer.loss(logits, target) if self.use_amp: with amp.scale_loss(loss, self.trainer.optimizer) as scaled_loss: scaled_loss.backward() self.trainer.optimizer.synchronize() with self.trainer.optimizer.skip_synchronize(): self.trainer.optimizer.step() else: loss.backward() self.trainer.optimizer.step() if self.use_ema: self.model_ema.update(self.trainer.model) self.num_updates += 1 self.trainer.lr_scheduler.step_update(num_updates=self.num_updates) return {'loss': loss.item(), 'train_batch_output': logits, 'lr': self.trainer.lr_scheduler.get_epoch_values(self.epoch)} def before_valid(self, epoch, logs=None): """Be called before valid loop.""" if self.use_ema: self.trainer.model = self.model_ema.ema self.trainer.model.eval() def after_epoch(self, epoch, logs=None): """Be called after each epoch.""" if self.use_ema: self.trainer.model = self.model self.trainer.lr_scheduler.step(epoch=epoch + 1) if self.trainer.is_chief: self.trainer._backup() def _init_all_settings(self): # noqa: C901 """Init all settings from config.""" self.config = self.trainer.config if self.trainer.hps and self.trainer.hps.get('trainer'): self.config.from_dict(self.trainer.hps.get('trainer')) self.trainer._init_distributed_setting() if not vega.is_cpu_device(): self.trainer._init_setting() self.epochs = self.trainer.epochs self.distributed = self.trainer.distributed self.trainer.model = self._init_model() self.model = self.trainer.model self.use_syncbn = self.config.syncbn self.trainer.use_syncbn = self.use_syncbn if self.use_syncbn: self.trainer.model = apex.parallel.convert_syncbn_model(self.trainer.model) self.trainer.optimizer = self._init_optimizer() self.use_ema = hasattr(self.config, 'model_ema') if self.use_ema: self.model_ema = self._init_model_ema() self.trainer.lr_scheduler = self._init_lr_scheduler() self.trainer.loss = self._init_loss() if self.distributed: self.trainer._init_horovod_setting() self.use_amp = self.config.amp if self.use_amp: self.trainer.model, self.trainer.optimizer = amp.initialize(self.trainer.model, self.trainer.optimizer, opt_level='O1') self._init_dataloader() self.trainer.valid_metrics = self.trainer._init_metrics(None) self.trainer.callbacks._set_params(self.trainer) # self.trainer.has_built = True def _init_model_ema(self): """Init Model Ema.""" args = self.config.model_ema model_ema = ModelEma(self.trainer.model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume=None) return model_ema def _init_model(self): """Init network model from timm according to model type in config.""" args = self.config.model_desc model = create_model(args.model_name, pretrained=args.pretrained, num_classes=args.num_classes, drop_rate=args.drop, drop_path_rate=args.drop_path, global_pool=args.gp, bn_tf=args.bn_tf, bn_momentum=args.bn_momentum, bn_eps=args.bn_eps, checkpoint_path=args.initial_checkpoint) if vega.is_gpu_device(): model = model.cuda() elif vega.is_npu_device(): model = model.npu() return model def _init_optimizer(self): """Init optimizer from timm according to optim type in config.""" optimizer = create_optimizer(self.config.optimizer().to_dict()["params"], self.trainer.model) if self.distributed: optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=self.trainer.model.named_parameters(), compression=hvd.Compression.none) return optimizer def _init_lr_scheduler(self): """Init lr scheduler from timm according to type in config.""" args = self.config.lr_scheduler().to_dict()["params"] args['epochs'] = self.config.epochs lr_scheduler, self.config.epochs = create_scheduler(Config(args), self.trainer.optimizer) start_epoch = args.get('start_epoch', 0) lr_scheduler.step(start_epoch) return lr_scheduler def _init_loss(self): """Init loss function from timm according to type in config.""" loss_name = self.config.loss.type loss_config = self.config.loss().to_dict()["params"] loss_class = getattr(importlib.import_module('timm.loss'), loss_name) loss_fn = loss_class(**loss_config) if vega.is_gpu_device(): loss_fn = loss_fn.cuda() elif vega.is_npu_device(): loss_fn = loss_fn.npu() return loss_fn def _reset_sync_opt(self): """Rest sysnc opt.""" params = add_weight_decay(self.model, self.config.optimizer.weight_decay) self.optimizer.param_groups = [] param_groups = list(params) if not isinstance(param_groups[0], dict): param_groups = [{'params': param_groups}] for param_group in param_groups: self.optimizer.add_param_group(param_group) def _init_dataloader(self): """Init dataloader from timm.""" if self.distributed and hvd.local_rank() == 0 and 'remote_data_dir' in self.config.dataset: FileOps.copy_folder(self.config.dataset.remote_data_dir, self.config.dataset.data_dir) if self.distributed: hvd.join() args = self.config.dataset train_dir = os.path.join(self.config.dataset.data_dir, 'train') dataset_train = Dataset(train_dir) world_size, rank = None, None if self.distributed: world_size, rank = hvd.size(), hvd.rank() self.trainer.train_loader = create_loader( dataset_train, input_size=tuple(args.input_size), batch_size=args.batch_size, is_training=True, use_prefetcher=self.config.prefetcher, rand_erase_prob=args.reprob, rand_erase_mode=args.remode, rand_erase_count=args.recount, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation='random', mean=tuple(args.mean), std=tuple(args.std), num_workers=args.workers, distributed=self.distributed, world_size=world_size, rank=rank ) valid_dir = os.path.join(self.config.dataset.data_dir, 'val') dataset_eval = Dataset(valid_dir) self.trainer.valid_loader = create_loader( dataset_eval, input_size=tuple(args.input_size), batch_size=4 * args.batch_size, is_training=False, use_prefetcher=self.config.prefetcher, interpolation=args.interpolation, mean=tuple(args.mean), std=tuple(args.std), num_workers=args.workers, distributed=self.distributed, world_size=world_size, rank=rank ) self.trainer.batch_num_train = len(self.trainer.train_loader) self.trainer.batch_num_valid = len(self.trainer.valid_loader)
38.650307
104
0.620794
import os import importlib import torch from timm import create_model from timm.optim.optim_factory import create_optimizer, add_weight_decay from timm.scheduler import create_scheduler from timm.data import Dataset, create_transform from timm.utils import ModelEma from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.data.loader import fast_collate, PrefetchLoader from timm.data.distributed_sampler import OrderedDistributedSampler try: import apex from apex import amp except Exception: pass import horovod.torch as hvd import vega from vega.common import Config from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.trainer.callbacks import Callback def create_loader( dataset, input_size, batch_size, is_training=False, use_prefetcher=True, rand_erase_prob=0., rand_erase_mode='const', rand_erase_count=1, color_jitter=0.4, auto_augment=None, interpolation='bilinear', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_workers=1, distributed=False, crop_pct=None, collate_fn=None, fp16=False, tf_preprocessing=False, world_size=None, rank=None ): dataset.transform = create_transform( input_size, is_training=is_training, use_prefetcher=use_prefetcher, color_jitter=color_jitter, auto_augment=auto_augment, interpolation=interpolation, mean=mean, std=std, crop_pct=crop_pct, tf_preprocessing=tf_preprocessing, ) sampler = None if distributed: if is_training: sampler = torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=world_size, rank=rank) else: sampler = OrderedDistributedSampler(dataset, num_replicas=world_size, rank=rank) if collate_fn is None: collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=sampler is None and is_training, num_workers=num_workers, sampler=sampler, collate_fn=collate_fn, drop_last=is_training, ) if use_prefetcher: loader = PrefetchLoader( loader, re_prob=rand_erase_prob if is_training else 0., re_mode=rand_erase_mode, re_count=rand_erase_count, mean=mean, std=std, fp16=fp16) return loader @ClassFactory.register(ClassType.CALLBACK) class TimmTrainerCallback(Callback): disable_callbacks = ["LearningRateScheduler", "ModelStatistics", "ModelBuilder"] def before_train(self, logs=None): self._init_all_settings() def before_epoch(self, epoch, logs=None): if self.distributed: self.trainer.train_loader.sampler.set_epoch(epoch) self.num_updates = epoch * len(self.trainer.train_loader) self.epoch = epoch self.trainer.model.train() def make_batch(self, batch): input, target = batch if not self.config.prefetcher: if vega.is_gpu_device(): input, target = input.cuda(), target.cuda() elif vega.is_npu_device(): input, target = input.npu(), target.npu() return input, target def train_step(self, batch): input, target = batch self.trainer.optimizer.zero_grad() logits = self.trainer.model(input) loss = self.trainer.loss(logits, target) if self.use_amp: with amp.scale_loss(loss, self.trainer.optimizer) as scaled_loss: scaled_loss.backward() self.trainer.optimizer.synchronize() with self.trainer.optimizer.skip_synchronize(): self.trainer.optimizer.step() else: loss.backward() self.trainer.optimizer.step() if self.use_ema: self.model_ema.update(self.trainer.model) self.num_updates += 1 self.trainer.lr_scheduler.step_update(num_updates=self.num_updates) return {'loss': loss.item(), 'train_batch_output': logits, 'lr': self.trainer.lr_scheduler.get_epoch_values(self.epoch)} def before_valid(self, epoch, logs=None): if self.use_ema: self.trainer.model = self.model_ema.ema self.trainer.model.eval() def after_epoch(self, epoch, logs=None): if self.use_ema: self.trainer.model = self.model self.trainer.lr_scheduler.step(epoch=epoch + 1) if self.trainer.is_chief: self.trainer._backup() def _init_all_settings(self): self.config = self.trainer.config if self.trainer.hps and self.trainer.hps.get('trainer'): self.config.from_dict(self.trainer.hps.get('trainer')) self.trainer._init_distributed_setting() if not vega.is_cpu_device(): self.trainer._init_setting() self.epochs = self.trainer.epochs self.distributed = self.trainer.distributed self.trainer.model = self._init_model() self.model = self.trainer.model self.use_syncbn = self.config.syncbn self.trainer.use_syncbn = self.use_syncbn if self.use_syncbn: self.trainer.model = apex.parallel.convert_syncbn_model(self.trainer.model) self.trainer.optimizer = self._init_optimizer() self.use_ema = hasattr(self.config, 'model_ema') if self.use_ema: self.model_ema = self._init_model_ema() self.trainer.lr_scheduler = self._init_lr_scheduler() self.trainer.loss = self._init_loss() if self.distributed: self.trainer._init_horovod_setting() self.use_amp = self.config.amp if self.use_amp: self.trainer.model, self.trainer.optimizer = amp.initialize(self.trainer.model, self.trainer.optimizer, opt_level='O1') self._init_dataloader() self.trainer.valid_metrics = self.trainer._init_metrics(None) self.trainer.callbacks._set_params(self.trainer) def _init_model_ema(self): args = self.config.model_ema model_ema = ModelEma(self.trainer.model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume=None) return model_ema def _init_model(self): args = self.config.model_desc model = create_model(args.model_name, pretrained=args.pretrained, num_classes=args.num_classes, drop_rate=args.drop, drop_path_rate=args.drop_path, global_pool=args.gp, bn_tf=args.bn_tf, bn_momentum=args.bn_momentum, bn_eps=args.bn_eps, checkpoint_path=args.initial_checkpoint) if vega.is_gpu_device(): model = model.cuda() elif vega.is_npu_device(): model = model.npu() return model def _init_optimizer(self): optimizer = create_optimizer(self.config.optimizer().to_dict()["params"], self.trainer.model) if self.distributed: optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=self.trainer.model.named_parameters(), compression=hvd.Compression.none) return optimizer def _init_lr_scheduler(self): args = self.config.lr_scheduler().to_dict()["params"] args['epochs'] = self.config.epochs lr_scheduler, self.config.epochs = create_scheduler(Config(args), self.trainer.optimizer) start_epoch = args.get('start_epoch', 0) lr_scheduler.step(start_epoch) return lr_scheduler def _init_loss(self): loss_name = self.config.loss.type loss_config = self.config.loss().to_dict()["params"] loss_class = getattr(importlib.import_module('timm.loss'), loss_name) loss_fn = loss_class(**loss_config) if vega.is_gpu_device(): loss_fn = loss_fn.cuda() elif vega.is_npu_device(): loss_fn = loss_fn.npu() return loss_fn def _reset_sync_opt(self): params = add_weight_decay(self.model, self.config.optimizer.weight_decay) self.optimizer.param_groups = [] param_groups = list(params) if not isinstance(param_groups[0], dict): param_groups = [{'params': param_groups}] for param_group in param_groups: self.optimizer.add_param_group(param_group) def _init_dataloader(self): if self.distributed and hvd.local_rank() == 0 and 'remote_data_dir' in self.config.dataset: FileOps.copy_folder(self.config.dataset.remote_data_dir, self.config.dataset.data_dir) if self.distributed: hvd.join() args = self.config.dataset train_dir = os.path.join(self.config.dataset.data_dir, 'train') dataset_train = Dataset(train_dir) world_size, rank = None, None if self.distributed: world_size, rank = hvd.size(), hvd.rank() self.trainer.train_loader = create_loader( dataset_train, input_size=tuple(args.input_size), batch_size=args.batch_size, is_training=True, use_prefetcher=self.config.prefetcher, rand_erase_prob=args.reprob, rand_erase_mode=args.remode, rand_erase_count=args.recount, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation='random', mean=tuple(args.mean), std=tuple(args.std), num_workers=args.workers, distributed=self.distributed, world_size=world_size, rank=rank ) valid_dir = os.path.join(self.config.dataset.data_dir, 'val') dataset_eval = Dataset(valid_dir) self.trainer.valid_loader = create_loader( dataset_eval, input_size=tuple(args.input_size), batch_size=4 * args.batch_size, is_training=False, use_prefetcher=self.config.prefetcher, interpolation=args.interpolation, mean=tuple(args.mean), std=tuple(args.std), num_workers=args.workers, distributed=self.distributed, world_size=world_size, rank=rank ) self.trainer.batch_num_train = len(self.trainer.train_loader) self.trainer.batch_num_valid = len(self.trainer.valid_loader)
true
true
1c354deb3b550e52959d47cde1cd5797407f7e48
394
py
Python
.idea/para.py
alinaskukina/sql
2600f29049a8e1613114db99f99f102677806ec8
[ "MIT" ]
null
null
null
.idea/para.py
alinaskukina/sql
2600f29049a8e1613114db99f99f102677806ec8
[ "MIT" ]
null
null
null
.idea/para.py
alinaskukina/sql
2600f29049a8e1613114db99f99f102677806ec8
[ "MIT" ]
null
null
null
cursor.execute('create index salary_index on works (salary)') con.commit() # количество всех записей ursor.execute('SELECT COUNT(*) FROM works') print(cursor.fetchall()[0][0]) # men cursor.execute('SELECT COUNT(*) FROM works where gender = "Мужской"') print(cursor.fetchall()[0][0]) # women cursor.execute('SELECT COUNT(*) FROM works where gender = "Женский"') print(cursor.fetchall()[0][0])
28.142857
69
0.720812
cursor.execute('create index salary_index on works (salary)') con.commit() ursor.execute('SELECT COUNT(*) FROM works') print(cursor.fetchall()[0][0]) cursor.execute('SELECT COUNT(*) FROM works where gender = "Мужской"') print(cursor.fetchall()[0][0]) cursor.execute('SELECT COUNT(*) FROM works where gender = "Женский"') print(cursor.fetchall()[0][0])
true
true
1c354f7b9aa064540b311d77ba59af35f5ca76da
4,678
py
Python
machine-learning-box/sentiment-analysis/predict_chainer.py
akito19/treasure-boxes
a70f69944ba40a2561c76fee3f27d47d9b8c68a7
[ "MIT" ]
null
null
null
machine-learning-box/sentiment-analysis/predict_chainer.py
akito19/treasure-boxes
a70f69944ba40a2561c76fee3f27d47d9b8c68a7
[ "MIT" ]
null
null
null
machine-learning-box/sentiment-analysis/predict_chainer.py
akito19/treasure-boxes
a70f69944ba40a2561c76fee3f27d47d9b8c68a7
[ "MIT" ]
null
null
null
import json import os import sys import tarfile from logging import DEBUG, StreamHandler, getLogger import numpy os.system(f"{sys.executable} -m pip install -U pytd==0.8.0 td-client") import pytd.pandas_td as td os.system(f"{sys.executable} -m pip install -U chainer") import chainer from chainer_utils import nets, nlp_utils MODEL_URL = "https://workflow-example-public.s3.amazonaws.com/imdb_model.tar.gz" logger = getLogger(__name__) handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False def setup_model(device, model_setup): setup = json.load(open(model_setup)) logger.info(json.dumps(setup, indent=2) + "\n") vocab = json.load(open(setup["vocab_path"])) n_class = setup["n_class"] # Setup a model if setup["model"] == "rnn": Encoder = nets.RNNEncoder elif setup["model"] == "cnn": Encoder = nets.CNNEncoder elif setup["model"] == "bow": Encoder = nets.BOWMLPEncoder encoder = Encoder( n_layers=setup["layer"], n_vocab=len(vocab), n_units=setup["unit"], dropout=setup["dropout"], ) model = nets.TextClassifier(encoder, n_class) chainer.serializers.load_npz(setup["model_path"], model) model.to_device(device) # Copy the model to the device return model, vocab, setup def run_batch( database, input_table, output_table, device, model, vocab, setup, batchsize=64 ): def predict_batch(words_batch): xs = nlp_utils.transform_to_array(words_batch, vocab, with_label=False) xs = nlp_utils.convert_seq(xs, device=device, with_label=False) with chainer.using_config("train", False), chainer.no_backprop_mode(): probs = model.predict(xs, softmax=True) # Note: Prediction labels are different from original Chainer example # positive: 1, negative: 0 answers = model.xp.argmax(probs, axis=1) scores = probs[model.xp.arange(answers.size), answers].tolist() return answers, scores td_api_key = os.environ["TD_API_KEY"] endpoint = os.environ["TD_API_SERVER"] logger.info("Connect to Treasure Data") con = td.connect() presto = td.create_engine(f"presto:{database}", con=con) logger.info("Fetch data from Treasure Data") test_df = td.read_td( f""" select rowid, sentence, sentiment, polarity from {input_table} """, presto, ) sentences = test_df["sentence"].tolist() logger.info("Start prediction") batch = [] predicted = [] i = 1 for sentence in sentences: text = nlp_utils.normalize_text(sentence) words = nlp_utils.split_text(text, char_based=setup["char_based"]) batch.append(words) if len(batch) >= batchsize: _predicted, _ = predict_batch(batch) predicted.append(_predicted) batch = [] logger.info(f"Predicted: {i}th batch. batch size {batchsize}") i += 1 if batch: _predicted, _ = predict_batch(batch) predicted.append(_predicted) logger.info("Finish prediction") test_df["predicted_polarity"] = numpy.concatenate(predicted, axis=None) # Note: Train test split strategy is different from pre trained model and # these tables so that the model includes test data since the model # is trained by Chainer official example. # This accuracy is just for a demo. # # accuracy = (test_df.polarity == test_df.predicted_polarity).value_counts()[ # 1 # ] / len(test_df) # print(f"Test set accuracy: {accuracy}") con2 = td.connect(apikey=td_api_key, endpoint=endpoint) td.to_td( test_df[["rowid", "predicted_polarity"]], f"{database}.{output_table}", con=con2, if_exists="replace", index=False, ) logger.info("Upload completed") def download_model(): path = chainer.dataset.cached_download(MODEL_URL) tf = tarfile.open(path, "r") tf.extractall() return os.path.join("result", "args.json") def predict_chainer(database, input_table, output_table, device_num=-1): device = chainer.get_device(device_num) device.use() model_setup = download_model() logger.info(f"model setup path: {model_setup}") model, vocab, setup = setup_model(device, model_setup) run_batch( database, input_table, output_table, device, model, vocab, setup, batchsize=64 ) if __name__ == "__main__": predict_chainer( "sentiment", "movie_review_test_shuffled", "test_predicted_polarities_chainer" )
28.876543
86
0.655622
import json import os import sys import tarfile from logging import DEBUG, StreamHandler, getLogger import numpy os.system(f"{sys.executable} -m pip install -U pytd==0.8.0 td-client") import pytd.pandas_td as td os.system(f"{sys.executable} -m pip install -U chainer") import chainer from chainer_utils import nets, nlp_utils MODEL_URL = "https://workflow-example-public.s3.amazonaws.com/imdb_model.tar.gz" logger = getLogger(__name__) handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False def setup_model(device, model_setup): setup = json.load(open(model_setup)) logger.info(json.dumps(setup, indent=2) + "\n") vocab = json.load(open(setup["vocab_path"])) n_class = setup["n_class"] if setup["model"] == "rnn": Encoder = nets.RNNEncoder elif setup["model"] == "cnn": Encoder = nets.CNNEncoder elif setup["model"] == "bow": Encoder = nets.BOWMLPEncoder encoder = Encoder( n_layers=setup["layer"], n_vocab=len(vocab), n_units=setup["unit"], dropout=setup["dropout"], ) model = nets.TextClassifier(encoder, n_class) chainer.serializers.load_npz(setup["model_path"], model) model.to_device(device) return model, vocab, setup def run_batch( database, input_table, output_table, device, model, vocab, setup, batchsize=64 ): def predict_batch(words_batch): xs = nlp_utils.transform_to_array(words_batch, vocab, with_label=False) xs = nlp_utils.convert_seq(xs, device=device, with_label=False) with chainer.using_config("train", False), chainer.no_backprop_mode(): probs = model.predict(xs, softmax=True) answers = model.xp.argmax(probs, axis=1) scores = probs[model.xp.arange(answers.size), answers].tolist() return answers, scores td_api_key = os.environ["TD_API_KEY"] endpoint = os.environ["TD_API_SERVER"] logger.info("Connect to Treasure Data") con = td.connect() presto = td.create_engine(f"presto:{database}", con=con) logger.info("Fetch data from Treasure Data") test_df = td.read_td( f""" select rowid, sentence, sentiment, polarity from {input_table} """, presto, ) sentences = test_df["sentence"].tolist() logger.info("Start prediction") batch = [] predicted = [] i = 1 for sentence in sentences: text = nlp_utils.normalize_text(sentence) words = nlp_utils.split_text(text, char_based=setup["char_based"]) batch.append(words) if len(batch) >= batchsize: _predicted, _ = predict_batch(batch) predicted.append(_predicted) batch = [] logger.info(f"Predicted: {i}th batch. batch size {batchsize}") i += 1 if batch: _predicted, _ = predict_batch(batch) predicted.append(_predicted) logger.info("Finish prediction") test_df["predicted_polarity"] = numpy.concatenate(predicted, axis=None) con2 = td.connect(apikey=td_api_key, endpoint=endpoint) td.to_td( test_df[["rowid", "predicted_polarity"]], f"{database}.{output_table}", con=con2, if_exists="replace", index=False, ) logger.info("Upload completed") def download_model(): path = chainer.dataset.cached_download(MODEL_URL) tf = tarfile.open(path, "r") tf.extractall() return os.path.join("result", "args.json") def predict_chainer(database, input_table, output_table, device_num=-1): device = chainer.get_device(device_num) device.use() model_setup = download_model() logger.info(f"model setup path: {model_setup}") model, vocab, setup = setup_model(device, model_setup) run_batch( database, input_table, output_table, device, model, vocab, setup, batchsize=64 ) if __name__ == "__main__": predict_chainer( "sentiment", "movie_review_test_shuffled", "test_predicted_polarities_chainer" )
true
true
1c354fd7ffeee53ea700310aab7c29b591e64050
6,100
py
Python
docs/source/conf.py
SimulatedANeal/carpedm
22bd5d28cfff50d7462e2a8e1b8dc1675e2a4c89
[ "MIT" ]
2
2020-09-30T04:59:06.000Z
2021-03-30T20:42:44.000Z
docs/source/conf.py
SimulatedANeal/carpedm
22bd5d28cfff50d7462e2a8e1b8dc1675e2a4c89
[ "MIT" ]
null
null
null
docs/source/conf.py
SimulatedANeal/carpedm
22bd5d28cfff50d7462e2a8e1b8dc1675e2a4c89
[ "MIT" ]
1
2018-05-25T07:15:16.000Z
2018-05-25T07:15:16.000Z
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys from unittest.mock import MagicMock sys.path.insert(0, os.path.abspath('./../../')) # -- Mock modules fix for those depending on C Library ----------------------- class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() MOCK_MODULES = [ 'numpy', 'tensorflow', 'tensorflow.python.platform', 'tensorflow.python.framework', 'tensorflow.contrib.training', 'tensorflow.python.training', 'tensorflow.core.framework', ] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # -- Project information ----------------------------------------------------- from carpedm import __version__ project = 'carpedm' copyright = '2018, Neal Digre' author = 'Neal Digre' # The short X.Y version version = ".".join(__version__.split('.')[:2]) # The full version, including alpha/beta/rc tags release = __version__ # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # html_style = 'css/mystle.css' # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'carpedmdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'carpedm.tex', 'carpedm Documentation', 'Neal Digre', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'carpedm', 'carpedm Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'carpedm', 'carpedm Documentation', author, 'carpedm', 'Data Manager for machine learning.', 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None, 'numpy': ('http://docs.scipy.org/doc/numpy/', None),} # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True
30.348259
79
0.643607
import os import sys from unittest.mock import MagicMock sys.path.insert(0, os.path.abspath('./../../')) class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() MOCK_MODULES = [ 'numpy', 'tensorflow', 'tensorflow.python.platform', 'tensorflow.python.framework', 'tensorflow.contrib.training', 'tensorflow.python.training', 'tensorflow.core.framework', ] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) from carpedm import __version__ project = 'carpedm' copyright = '2018, Neal Digre' author = 'Neal Digre' version = ".".join(__version__.split('.')[:2]) release = __version__ extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', ] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' language = None exclude_patterns = [] pygments_style = 'sphinx' html_theme = 'sphinx_rtd_theme' html_static_path = ['_static'] # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'carpedmdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'carpedm.tex', 'carpedm Documentation', 'Neal Digre', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'carpedm', 'carpedm Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'carpedm', 'carpedm Documentation', author, 'carpedm', 'Data Manager for machine learning.', 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None, 'numpy': ('http://docs.scipy.org/doc/numpy/', None),} # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True
true
true
1c355016f0f9b2d0c9b718446060b9de55e60d0c
5,385
py
Python
compiler/testdata/expected/python.asyncio/variety/f_Events_publisher.py
chrisgustavsen-wf/frugal
1140f5ff6a37b248e387d09a7779c7b25a4a92ee
[ "Apache-2.0" ]
null
null
null
compiler/testdata/expected/python.asyncio/variety/f_Events_publisher.py
chrisgustavsen-wf/frugal
1140f5ff6a37b248e387d09a7779c7b25a4a92ee
[ "Apache-2.0" ]
null
null
null
compiler/testdata/expected/python.asyncio/variety/f_Events_publisher.py
chrisgustavsen-wf/frugal
1140f5ff6a37b248e387d09a7779c7b25a4a92ee
[ "Apache-2.0" ]
null
null
null
# # Autogenerated by Frugal Compiler (3.14.1) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # import inspect import sys import traceback from thrift.Thrift import TApplicationException from thrift.Thrift import TMessageType from thrift.Thrift import TType from frugal.exceptions import TApplicationExceptionType from frugal.middleware import Method from frugal.subscription import FSubscription from frugal.transport import TMemoryOutputBuffer from .ttypes import * class EventsPublisher(object): """ This docstring gets added to the generated code because it has the @ sign. Prefix specifies topic prefix tokens, which can be static or variable. """ _DELIMITER = '.' def __init__(self, provider, middleware=None): """ Create a new EventsPublisher. Args: provider: FScopeProvider middleware: ServiceMiddleware or list of ServiceMiddleware """ middleware = middleware or [] if middleware and not isinstance(middleware, list): middleware = [middleware] middleware += provider.get_middleware() self._transport, self._protocol_factory = provider.new_publisher() self._methods = { 'publish_EventCreated': Method(self._publish_EventCreated, middleware), 'publish_SomeInt': Method(self._publish_SomeInt, middleware), 'publish_SomeStr': Method(self._publish_SomeStr, middleware), 'publish_SomeList': Method(self._publish_SomeList, middleware), } async def open(self): await self._transport.open() async def close(self): await self._transport.close() async def publish_EventCreated(self, ctx, user, req): """ This is a docstring. Args: ctx: FContext user: string req: Event """ await self._methods['publish_EventCreated']([ctx, user, req]) async def _publish_EventCreated(self, ctx, user, req): ctx.set_request_header('_topic_user', user) op = 'EventCreated' prefix = 'foo.{}.'.format(user) topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op) buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit()) oprot = self._protocol_factory.get_protocol(buffer) oprot.write_request_headers(ctx) oprot.writeMessageBegin(op, TMessageType.CALL, 0) req.write(oprot) oprot.writeMessageEnd() await self._transport.publish(topic, buffer.getvalue()) async def publish_SomeInt(self, ctx, user, req): """ Args: ctx: FContext user: string req: i64 """ await self._methods['publish_SomeInt']([ctx, user, req]) async def _publish_SomeInt(self, ctx, user, req): ctx.set_request_header('_topic_user', user) op = 'SomeInt' prefix = 'foo.{}.'.format(user) topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op) buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit()) oprot = self._protocol_factory.get_protocol(buffer) oprot.write_request_headers(ctx) oprot.writeMessageBegin(op, TMessageType.CALL, 0) oprot.writeI64(req) oprot.writeMessageEnd() await self._transport.publish(topic, buffer.getvalue()) async def publish_SomeStr(self, ctx, user, req): """ Args: ctx: FContext user: string req: string """ await self._methods['publish_SomeStr']([ctx, user, req]) async def _publish_SomeStr(self, ctx, user, req): ctx.set_request_header('_topic_user', user) op = 'SomeStr' prefix = 'foo.{}.'.format(user) topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op) buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit()) oprot = self._protocol_factory.get_protocol(buffer) oprot.write_request_headers(ctx) oprot.writeMessageBegin(op, TMessageType.CALL, 0) oprot.writeString(req) oprot.writeMessageEnd() await self._transport.publish(topic, buffer.getvalue()) async def publish_SomeList(self, ctx, user, req): """ Args: ctx: FContext user: string req: list """ await self._methods['publish_SomeList']([ctx, user, req]) async def _publish_SomeList(self, ctx, user, req): ctx.set_request_header('_topic_user', user) op = 'SomeList' prefix = 'foo.{}.'.format(user) topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op) buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit()) oprot = self._protocol_factory.get_protocol(buffer) oprot.write_request_headers(ctx) oprot.writeMessageBegin(op, TMessageType.CALL, 0) oprot.writeListBegin(TType.MAP, len(req)) for elem70 in req: oprot.writeMapBegin(TType.I64, TType.STRUCT, len(elem70)) for elem72, elem71 in elem70.items(): oprot.writeI64(elem72) elem71.write(oprot) oprot.writeMapEnd() oprot.writeListEnd() oprot.writeMessageEnd() await self._transport.publish(topic, buffer.getvalue())
33.240741
83
0.636769
import inspect import sys import traceback from thrift.Thrift import TApplicationException from thrift.Thrift import TMessageType from thrift.Thrift import TType from frugal.exceptions import TApplicationExceptionType from frugal.middleware import Method from frugal.subscription import FSubscription from frugal.transport import TMemoryOutputBuffer from .ttypes import * class EventsPublisher(object): _DELIMITER = '.' def __init__(self, provider, middleware=None): middleware = middleware or [] if middleware and not isinstance(middleware, list): middleware = [middleware] middleware += provider.get_middleware() self._transport, self._protocol_factory = provider.new_publisher() self._methods = { 'publish_EventCreated': Method(self._publish_EventCreated, middleware), 'publish_SomeInt': Method(self._publish_SomeInt, middleware), 'publish_SomeStr': Method(self._publish_SomeStr, middleware), 'publish_SomeList': Method(self._publish_SomeList, middleware), } async def open(self): await self._transport.open() async def close(self): await self._transport.close() async def publish_EventCreated(self, ctx, user, req): await self._methods['publish_EventCreated']([ctx, user, req]) async def _publish_EventCreated(self, ctx, user, req): ctx.set_request_header('_topic_user', user) op = 'EventCreated' prefix = 'foo.{}.'.format(user) topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op) buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit()) oprot = self._protocol_factory.get_protocol(buffer) oprot.write_request_headers(ctx) oprot.writeMessageBegin(op, TMessageType.CALL, 0) req.write(oprot) oprot.writeMessageEnd() await self._transport.publish(topic, buffer.getvalue()) async def publish_SomeInt(self, ctx, user, req): await self._methods['publish_SomeInt']([ctx, user, req]) async def _publish_SomeInt(self, ctx, user, req): ctx.set_request_header('_topic_user', user) op = 'SomeInt' prefix = 'foo.{}.'.format(user) topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op) buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit()) oprot = self._protocol_factory.get_protocol(buffer) oprot.write_request_headers(ctx) oprot.writeMessageBegin(op, TMessageType.CALL, 0) oprot.writeI64(req) oprot.writeMessageEnd() await self._transport.publish(topic, buffer.getvalue()) async def publish_SomeStr(self, ctx, user, req): await self._methods['publish_SomeStr']([ctx, user, req]) async def _publish_SomeStr(self, ctx, user, req): ctx.set_request_header('_topic_user', user) op = 'SomeStr' prefix = 'foo.{}.'.format(user) topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op) buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit()) oprot = self._protocol_factory.get_protocol(buffer) oprot.write_request_headers(ctx) oprot.writeMessageBegin(op, TMessageType.CALL, 0) oprot.writeString(req) oprot.writeMessageEnd() await self._transport.publish(topic, buffer.getvalue()) async def publish_SomeList(self, ctx, user, req): await self._methods['publish_SomeList']([ctx, user, req]) async def _publish_SomeList(self, ctx, user, req): ctx.set_request_header('_topic_user', user) op = 'SomeList' prefix = 'foo.{}.'.format(user) topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op) buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit()) oprot = self._protocol_factory.get_protocol(buffer) oprot.write_request_headers(ctx) oprot.writeMessageBegin(op, TMessageType.CALL, 0) oprot.writeListBegin(TType.MAP, len(req)) for elem70 in req: oprot.writeMapBegin(TType.I64, TType.STRUCT, len(elem70)) for elem72, elem71 in elem70.items(): oprot.writeI64(elem72) elem71.write(oprot) oprot.writeMapEnd() oprot.writeListEnd() oprot.writeMessageEnd() await self._transport.publish(topic, buffer.getvalue())
true
true
1c35517f1fb087b0522b5f6ab5e6f6c74382dc96
613
py
Python
optimus/engines/dask/mask.py
niallscc/Optimus
35218401556e5acc4beb2859084128ebcd1ab4e5
[ "Apache-2.0" ]
null
null
null
optimus/engines/dask/mask.py
niallscc/Optimus
35218401556e5acc4beb2859084128ebcd1ab4e5
[ "Apache-2.0" ]
null
null
null
optimus/engines/dask/mask.py
niallscc/Optimus
35218401556e5acc4beb2859084128ebcd1ab4e5
[ "Apache-2.0" ]
null
null
null
from optimus.engines.base.commons.functions import is_string, is_integer, is_float, is_numeric from optimus.engines.base.mask import Mask class DaskMask(Mask): def str(self, col_name="*"): return self.root.cols.apply(col_name, is_string, mode="partitioned") def int(self, col_name="*"): return self.root.cols.apply(col_name, is_integer, mode="partitioned") def float(self, col_name="*"): return self.root.cols.apply(col_name, is_float, mode="partitioned") def numeric(self, col_name="*"): return self.root.cols.apply(col_name, is_numeric, mode="partitioned")
34.055556
94
0.706362
from optimus.engines.base.commons.functions import is_string, is_integer, is_float, is_numeric from optimus.engines.base.mask import Mask class DaskMask(Mask): def str(self, col_name="*"): return self.root.cols.apply(col_name, is_string, mode="partitioned") def int(self, col_name="*"): return self.root.cols.apply(col_name, is_integer, mode="partitioned") def float(self, col_name="*"): return self.root.cols.apply(col_name, is_float, mode="partitioned") def numeric(self, col_name="*"): return self.root.cols.apply(col_name, is_numeric, mode="partitioned")
true
true
1c35519920b3991834ea4893aa056ce66a202318
9,077
py
Python
orchestration/ci_helpers.py
danieldiamond/gitlab-analytics
f99e02c95c3a964b01cb14617a43cd5f64ecd88d
[ "MIT" ]
3
2021-07-22T06:44:31.000Z
2022-01-29T05:35:12.000Z
orchestration/ci_helpers.py
danieldiamond/gitlab-analytics
f99e02c95c3a964b01cb14617a43cd5f64ecd88d
[ "MIT" ]
null
null
null
orchestration/ci_helpers.py
danieldiamond/gitlab-analytics
f99e02c95c3a964b01cb14617a43cd5f64ecd88d
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import pandas as pd import sys from atexit import register from fire import Fire from functools import partial from logging import info, error, basicConfig from os import environ as env, remove from subprocess import run, PIPE, Popen, _active, _cleanup from typing import Tuple @register def exit_cleanup(): """ Make sure that when the script exits it kills all subprocesses. """ for proc in _active: proc.kill() proc.communicate() _cleanup() run("kill -9 $(pgrep cloud_sql_proxy)", shell=True, stderr=PIPE) if not _active: info("Processes cleaned up.") else: error("Sub processes could not be cleaned up.") def auth_gcloud(bash_partial) -> None: """ Authenticate the gcloud service account. """ try: with open("gcp_credentials.json", "w") as file: file.write(env["GCP_SERVICE_CREDS"]) bash_partial( "gcloud auth activate-service-account --key-file=gcp_credentials.json" ) except IOError: error("Could not store GCP creds as a json file.") sys.exit(1) except: error("Could not authenticate service account.") sys.exit(1) info("Account successfully authenticated.") def find_sql_instance(gcp_instance_ref_slug) -> str: """ Find the gcp instance with the same ref slug, don't worry about the ID. """ if env["CI_COMMIT_REF_NAME"] == "master": return env["GCP_PRODUCTION_INSTANCE_NAME"] instance_list_raw = run( "gcloud sql instances list --project {}".format(env["GCP_PROJECT"]), stdout=PIPE, shell=True, check=True, ).stdout try: [instance_name] = [ instance_name for instance_row in instance_list_raw.decode("utf-8").split(" ") for instance_name in instance_row.split("\n") if gcp_instance_ref_slug in instance_name ] or [None] except: info("No instance found.") return instance_name def set_sql_instance() -> Tuple[str, str, str]: """ Create a sql instance using the slug and ci_job_id. Max length for instance name is 95, take away the length of the job_id and the two hyphens and that leaves 83 characters for the name. """ slug_length = 83 - len(env["CI_PROJECT_NAME"]) gcp_instance_ref_slug = "{}-{}".format( env["CI_PROJECT_NAME"], env["CI_COMMIT_REF_SLUG"][0:slug_length] ) job_gcp_instance_name = "{}-{}".format(gcp_instance_ref_slug, env["CI_JOB_ID"][:8]) instance_name = find_sql_instance(gcp_instance_ref_slug) info("Found instance with name: {}".format(instance_name)) return instance_name, job_gcp_instance_name, gcp_instance_ref_slug def async_run(command: str, instance: str) -> None: """ Run gcloud commands using the async flag and waiting for them to finish/fail. """ info("Running command using async: {}".format(command)) run(command + " --async", shell=True, check=True, stdout=PIPE) # Returns bytes with newlines, assume the first operation id is correct operation = ( run( "gcloud sql operations list --instance='{}' --filter='status!=DONE' --format='value(name)'".format( instance ), shell=True, check=True, stdout=PIPE, ) .stdout.decode("utf-8") .rstrip() ) info("Pending Operation: {}".format(operation)) info("Waiting for operation to finish/fail...") while True: status = ( run( 'sleep 10; gcloud sql operations describe "{}" --format="value(status)"'.format( operation ), shell=True, check=True, stdout=PIPE, ) .stdout.decode("utf-8") .rstrip() ) if status == "DONE": info(status) break else: info(status) def use_cloudsqlproxy(command: str) -> None: """ Execute a command while running the cloud sql proxy in the background. """ # Get the instance name and start the proxy instance_name, *_ = set_sql_instance() sql_proxy_command = "cloud_sql_proxy -instances={}:{}:{}=tcp:5432 -credential_file=gcp_credentials.json -verbose=False" sql_proxy = Popen( sql_proxy_command.format(env["GCP_PROJECT"], env["GCP_REGION"], instance_name), shell=True, ) info("Proxy is running.") run(command, shell=True, check=True) return def delete_review_cloudsql() -> None: """ Delete the cloudsql instance unless it is master (production). """ # Set the instance name and make sure it exists and isn't master instance_name, *_ = set_sql_instance() if not instance_name: error("This instance does not exist. Call manage_instances to create one.") sys.exit(1) if instance_name == env["GCP_PRODUCTION_INSTANCE_NAME"]: info("The branch name cannot match the production EDW instance name.") sys.exit(1) delete_instance_command = 'gcloud sql instances delete -q --project "{}" "{}"' run( delete_instance_command.format(env["GCP_PROJECT"], instance_name), shell=True, check=True, ) info("Instance Deleted.") def manage_review_cloudsql() -> None: """ Detemine whether to create, do nothing to, or clean up cloud instances. """ instance_name, job_gcp_instance_name, gcp_instance_ref_slug = set_sql_instance() # Check if script should force delete related instances if instance_name and env.get("FORCE") == "true": info("Cleaning up old sql instances.") list_instances_command = "gcloud sql instances list --project {} --filter {}" instance_list_raw = run( list_instances_command.format(env["GCP_PROJECT"], gcp_instance_ref_slug), shell=True, check=True, stdout=PIPE, ).stdout instance_list = [ instance_name for instance_row in instance_list_raw.decode("utf-8").split(" ") for instance_name in instance_row.split("\n") if env["CI_PROJECT_NAME"] in instance_name ] delete_instance_command = "gcloud sql instances delete -q --project {} {}" for instance in instance_list: info("Deleting instance: {}".format(instance)) run( delete_instance_command.format(env["GCP_PROJECT"], instance), shell=True, check=True, ) # If not forcing deletion and there is an instance, echo the name elif instance_name: info("Instance is available at: {}".format(instance_name)) return # If no instance existed or force deleted, create a new instance info("Cloning new instance {}".format(job_gcp_instance_name)) clone_instance_command = 'gcloud sql instances clone -q --project "{}" "{}" "{}"' async_run( clone_instance_command.format( env["GCP_PROJECT"], env["GCP_PRODUCTION_INSTANCE_NAME"], job_gcp_instance_name, ), job_gcp_instance_name, ) return def refresh_dev_cloudsql(): """ Update the dev instance. """ info("Restoring the dev instance from the latest successful prod backup.") # Dump a list of recent backups into a txt file run( "gcloud config set project {}".format(env["GCP_PROJECT"]), shell=True, check=True, ) backup_list_filename = "backup_list.txt" run( "gcloud sql backups list --instance {} > {}".format( env["GCP_PRODUCTION_INSTANCE_NAME"], backup_list_filename ), shell=True, check=True, stdout=PIPE, ).stdout # Get the most recent successful backup ID backup_df = pd.read_table("backup_list.txt", delim_whitespace=True).query( 'STATUS == "SUCCESSFUL"' ) backup_df["WINDOW_START_TIME"] = pd.to_datetime(backup_df["WINDOW_START_TIME"]) [backup_id] = backup_df.query("WINDOW_START_TIME == @newest_backup")["ID"] remove(backup_list_filename) # Trigger the dev instance refresh instance_refresh_command = 'gcloud sql backups restore {} -q --restore-instance="{}" --backup-instance="{}"' run( instance_refresh_command.format( backup_id, env["GCP_DEV_INSTANCE_NAME"], env["GCP_PRODUCTION_INSTANCE_NAME"] ), shell=True, check=True, ) if __name__ == "__main__": # Do some setup before running any Fire functions basicConfig(stream=sys.stdout, level=20) bash = partial(run, shell=True, check=True) auth_gcloud(bash) Fire( { "get_sql_instance": set_sql_instance, "use_proxy": use_cloudsqlproxy, "manage_instances": manage_review_cloudsql, "delete_instance": delete_review_cloudsql, "refresh_dev_instance": refresh_dev_cloudsql, } )
31.3
123
0.623334
import pandas as pd import sys from atexit import register from fire import Fire from functools import partial from logging import info, error, basicConfig from os import environ as env, remove from subprocess import run, PIPE, Popen, _active, _cleanup from typing import Tuple @register def exit_cleanup(): for proc in _active: proc.kill() proc.communicate() _cleanup() run("kill -9 $(pgrep cloud_sql_proxy)", shell=True, stderr=PIPE) if not _active: info("Processes cleaned up.") else: error("Sub processes could not be cleaned up.") def auth_gcloud(bash_partial) -> None: try: with open("gcp_credentials.json", "w") as file: file.write(env["GCP_SERVICE_CREDS"]) bash_partial( "gcloud auth activate-service-account --key-file=gcp_credentials.json" ) except IOError: error("Could not store GCP creds as a json file.") sys.exit(1) except: error("Could not authenticate service account.") sys.exit(1) info("Account successfully authenticated.") def find_sql_instance(gcp_instance_ref_slug) -> str: if env["CI_COMMIT_REF_NAME"] == "master": return env["GCP_PRODUCTION_INSTANCE_NAME"] instance_list_raw = run( "gcloud sql instances list --project {}".format(env["GCP_PROJECT"]), stdout=PIPE, shell=True, check=True, ).stdout try: [instance_name] = [ instance_name for instance_row in instance_list_raw.decode("utf-8").split(" ") for instance_name in instance_row.split("\n") if gcp_instance_ref_slug in instance_name ] or [None] except: info("No instance found.") return instance_name def set_sql_instance() -> Tuple[str, str, str]: slug_length = 83 - len(env["CI_PROJECT_NAME"]) gcp_instance_ref_slug = "{}-{}".format( env["CI_PROJECT_NAME"], env["CI_COMMIT_REF_SLUG"][0:slug_length] ) job_gcp_instance_name = "{}-{}".format(gcp_instance_ref_slug, env["CI_JOB_ID"][:8]) instance_name = find_sql_instance(gcp_instance_ref_slug) info("Found instance with name: {}".format(instance_name)) return instance_name, job_gcp_instance_name, gcp_instance_ref_slug def async_run(command: str, instance: str) -> None: info("Running command using async: {}".format(command)) run(command + " --async", shell=True, check=True, stdout=PIPE) operation = ( run( "gcloud sql operations list --instance='{}' --filter='status!=DONE' --format='value(name)'".format( instance ), shell=True, check=True, stdout=PIPE, ) .stdout.decode("utf-8") .rstrip() ) info("Pending Operation: {}".format(operation)) info("Waiting for operation to finish/fail...") while True: status = ( run( 'sleep 10; gcloud sql operations describe "{}" --format="value(status)"'.format( operation ), shell=True, check=True, stdout=PIPE, ) .stdout.decode("utf-8") .rstrip() ) if status == "DONE": info(status) break else: info(status) def use_cloudsqlproxy(command: str) -> None: instance_name, *_ = set_sql_instance() sql_proxy_command = "cloud_sql_proxy -instances={}:{}:{}=tcp:5432 -credential_file=gcp_credentials.json -verbose=False" sql_proxy = Popen( sql_proxy_command.format(env["GCP_PROJECT"], env["GCP_REGION"], instance_name), shell=True, ) info("Proxy is running.") run(command, shell=True, check=True) return def delete_review_cloudsql() -> None: instance_name, *_ = set_sql_instance() if not instance_name: error("This instance does not exist. Call manage_instances to create one.") sys.exit(1) if instance_name == env["GCP_PRODUCTION_INSTANCE_NAME"]: info("The branch name cannot match the production EDW instance name.") sys.exit(1) delete_instance_command = 'gcloud sql instances delete -q --project "{}" "{}"' run( delete_instance_command.format(env["GCP_PROJECT"], instance_name), shell=True, check=True, ) info("Instance Deleted.") def manage_review_cloudsql() -> None: instance_name, job_gcp_instance_name, gcp_instance_ref_slug = set_sql_instance() # Check if script should force delete related instances if instance_name and env.get("FORCE") == "true": info("Cleaning up old sql instances.") list_instances_command = "gcloud sql instances list --project {} --filter {}" instance_list_raw = run( list_instances_command.format(env["GCP_PROJECT"], gcp_instance_ref_slug), shell=True, check=True, stdout=PIPE, ).stdout instance_list = [ instance_name for instance_row in instance_list_raw.decode("utf-8").split(" ") for instance_name in instance_row.split("\n") if env["CI_PROJECT_NAME"] in instance_name ] delete_instance_command = "gcloud sql instances delete -q --project {} {}" for instance in instance_list: info("Deleting instance: {}".format(instance)) run( delete_instance_command.format(env["GCP_PROJECT"], instance), shell=True, check=True, ) # If not forcing deletion and there is an instance, echo the name elif instance_name: info("Instance is available at: {}".format(instance_name)) return # If no instance existed or force deleted, create a new instance info("Cloning new instance {}".format(job_gcp_instance_name)) clone_instance_command = 'gcloud sql instances clone -q --project "{}" "{}" "{}"' async_run( clone_instance_command.format( env["GCP_PROJECT"], env["GCP_PRODUCTION_INSTANCE_NAME"], job_gcp_instance_name, ), job_gcp_instance_name, ) return def refresh_dev_cloudsql(): info("Restoring the dev instance from the latest successful prod backup.") # Dump a list of recent backups into a txt file run( "gcloud config set project {}".format(env["GCP_PROJECT"]), shell=True, check=True, ) backup_list_filename = "backup_list.txt" run( "gcloud sql backups list --instance {} > {}".format( env["GCP_PRODUCTION_INSTANCE_NAME"], backup_list_filename ), shell=True, check=True, stdout=PIPE, ).stdout # Get the most recent successful backup ID backup_df = pd.read_table("backup_list.txt", delim_whitespace=True).query( 'STATUS == "SUCCESSFUL"' ) backup_df["WINDOW_START_TIME"] = pd.to_datetime(backup_df["WINDOW_START_TIME"]) [backup_id] = backup_df.query("WINDOW_START_TIME == @newest_backup")["ID"] remove(backup_list_filename) # Trigger the dev instance refresh instance_refresh_command = 'gcloud sql backups restore {} -q --restore-instance="{}" --backup-instance="{}"' run( instance_refresh_command.format( backup_id, env["GCP_DEV_INSTANCE_NAME"], env["GCP_PRODUCTION_INSTANCE_NAME"] ), shell=True, check=True, ) if __name__ == "__main__": # Do some setup before running any Fire functions basicConfig(stream=sys.stdout, level=20) bash = partial(run, shell=True, check=True) auth_gcloud(bash) Fire( { "get_sql_instance": set_sql_instance, "use_proxy": use_cloudsqlproxy, "manage_instances": manage_review_cloudsql, "delete_instance": delete_review_cloudsql, "refresh_dev_instance": refresh_dev_cloudsql, } )
true
true
1c3551b6915948fa8b32f6f08edf1841ec84fe43
1,511
py
Python
snippet/templatetags/snippet.py
RealGeeks/django-snippet
75f20e69902ba78bda6746b1ac7e8885fbfff698
[ "MIT" ]
null
null
null
snippet/templatetags/snippet.py
RealGeeks/django-snippet
75f20e69902ba78bda6746b1ac7e8885fbfff698
[ "MIT" ]
null
null
null
snippet/templatetags/snippet.py
RealGeeks/django-snippet
75f20e69902ba78bda6746b1ac7e8885fbfff698
[ "MIT" ]
1
2020-11-07T10:22:48.000Z
2020-11-07T10:22:48.000Z
from django import template from ..models import Snippet register = template.Library() default_snippet = { "default": '"New Snippet"', "safe": True, } class snippet_node(template.Node): def __init__(self, name, default): self.name = template.Variable(name) self.default = default def render(self, context): name = self.name.resolve(context) default = self.default.render(context) return Snippet.cache.get(name, default) @register.tag def snippet(parser, token): args, kwargs = interpret_args( token.split_contents(), default=default_snippet, ) default = fake_nodelist(kwargs['default']) name = args[0] return snippet_node(name, default) @register.tag def snippetblock(parser, token): args, kwargs = interpret_args( token.split_contents(), default=default_snippet, ) name = args[0] default = parser.parse(('endsnippetblock',)) parser.delete_first_token() return snippet_node(name, default) def interpret_args(token_args, default): args = [] kwargs = dict(default) for token in token_args[1:]: if '=' in token: if token[0] in ('\'', '"'): args.append(token) else: key, value = token.split('=',1) if key in kwargs: kwargs[key] = value else: args.append(token) if not len(args): raise template.TemplateSyntaxError("Snippetblock needs an ID") return args, kwargs class fake_nodelist(object): def __init__(self, content): self.content = template.Variable(content) def render(self, context): return self.content.resolve(context)
23.246154
64
0.712111
from django import template from ..models import Snippet register = template.Library() default_snippet = { "default": '"New Snippet"', "safe": True, } class snippet_node(template.Node): def __init__(self, name, default): self.name = template.Variable(name) self.default = default def render(self, context): name = self.name.resolve(context) default = self.default.render(context) return Snippet.cache.get(name, default) @register.tag def snippet(parser, token): args, kwargs = interpret_args( token.split_contents(), default=default_snippet, ) default = fake_nodelist(kwargs['default']) name = args[0] return snippet_node(name, default) @register.tag def snippetblock(parser, token): args, kwargs = interpret_args( token.split_contents(), default=default_snippet, ) name = args[0] default = parser.parse(('endsnippetblock',)) parser.delete_first_token() return snippet_node(name, default) def interpret_args(token_args, default): args = [] kwargs = dict(default) for token in token_args[1:]: if '=' in token: if token[0] in ('\'', '"'): args.append(token) else: key, value = token.split('=',1) if key in kwargs: kwargs[key] = value else: args.append(token) if not len(args): raise template.TemplateSyntaxError("Snippetblock needs an ID") return args, kwargs class fake_nodelist(object): def __init__(self, content): self.content = template.Variable(content) def render(self, context): return self.content.resolve(context)
true
true
1c3551ee0553f10a72ff85a89b3a75edbe3228ac
2,630
py
Python
plugins/modules/snmp_property.py
robertcsapo/dnacenter-ansible
33f776f8c0bc7113da73191c301dd1807e6b4a43
[ "MIT" ]
null
null
null
plugins/modules/snmp_property.py
robertcsapo/dnacenter-ansible
33f776f8c0bc7113da73191c301dd1807e6b4a43
[ "MIT" ]
null
null
null
plugins/modules/snmp_property.py
robertcsapo/dnacenter-ansible
33f776f8c0bc7113da73191c301dd1807e6b4a43
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2021, Cisco Systems # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) DOCUMENTATION = r""" --- module: snmp_property short_description: Manage SnmpProperty objects of Discovery description: - Returns SNMP properties. - Adds SNMP properties. version_added: '1.0.0' author: Rafael Campos (@racampos) options: payload: description: - An object to send in the Request body. - Required for state create. type: list elements: dict suboptions: id: description: - It is the snmp property's id. type: str instanceTenantId: description: - It is the snmp property's instanceTenantId. type: str instanceUuid: description: - It is the snmp property's instanceUuid. type: str intValue: description: - It is the snmp property's intValue. type: int systemPropertyName: description: - It is the snmp property's systemPropertyName. type: str requirements: - dnacentersdk seealso: # Reference by module name - module: cisco.dnac.plugins.module_utils.definitions.snmp_property # Reference by Internet resource - name: SnmpProperty reference description: Complete reference of the SnmpProperty object model. link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x # Reference by Internet resource - name: SnmpProperty reference description: SDK reference. link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary """ EXAMPLES = r""" - name: get_snmp_properties cisco.dnac.snmp_property: state: query # required register: nm_get_snmp_properties - name: create_update_snmp_properties cisco.dnac.snmp_property: state: create # required payload: # required - id: SomeValue # string instanceTenantId: SomeValue # string instanceUuid: SomeValue # string intValue: 1 # integer systemPropertyName: SomeValue # string """ RETURN = r""" dnac_response: description: A dictionary with the response returned by the DNA Center Python SDK returned: always type: dict sample: {"response": 29, "version": "1.0"} sdk_function: description: The DNA Center SDK function used to execute the task returned: always type: str sample: discovery.create_update_snmp_properties missing_params: description: Provided arguments do not comply with the schema of the DNA Center Python SDK function returned: when the function request schema is not satisfied type: list sample: """
27.395833
101
0.702281
DOCUMENTATION = r""" --- module: snmp_property short_description: Manage SnmpProperty objects of Discovery description: - Returns SNMP properties. - Adds SNMP properties. version_added: '1.0.0' author: Rafael Campos (@racampos) options: payload: description: - An object to send in the Request body. - Required for state create. type: list elements: dict suboptions: id: description: - It is the snmp property's id. type: str instanceTenantId: description: - It is the snmp property's instanceTenantId. type: str instanceUuid: description: - It is the snmp property's instanceUuid. type: str intValue: description: - It is the snmp property's intValue. type: int systemPropertyName: description: - It is the snmp property's systemPropertyName. type: str requirements: - dnacentersdk seealso: # Reference by module name - module: cisco.dnac.plugins.module_utils.definitions.snmp_property # Reference by Internet resource - name: SnmpProperty reference description: Complete reference of the SnmpProperty object model. link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x # Reference by Internet resource - name: SnmpProperty reference description: SDK reference. link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary """ EXAMPLES = r""" - name: get_snmp_properties cisco.dnac.snmp_property: state: query # required register: nm_get_snmp_properties - name: create_update_snmp_properties cisco.dnac.snmp_property: state: create # required payload: # required - id: SomeValue # string instanceTenantId: SomeValue # string instanceUuid: SomeValue # string intValue: 1 # integer systemPropertyName: SomeValue # string """ RETURN = r""" dnac_response: description: A dictionary with the response returned by the DNA Center Python SDK returned: always type: dict sample: {"response": 29, "version": "1.0"} sdk_function: description: The DNA Center SDK function used to execute the task returned: always type: str sample: discovery.create_update_snmp_properties missing_params: description: Provided arguments do not comply with the schema of the DNA Center Python SDK function returned: when the function request schema is not satisfied type: list sample: """
true
true
1c35527632154c510f110f3ebe027e62bcd3c139
9,322
py
Python
WWW/pycopia/WWW/website.py
kdart/pycopia3
8a7c820f096245411eabbb72345e4f30a35988b6
[ "Apache-2.0" ]
3
2018-11-26T15:00:20.000Z
2022-01-28T23:17:58.000Z
WWW/pycopia/WWW/website.py
kdart/pycopia3
8a7c820f096245411eabbb72345e4f30a35988b6
[ "Apache-2.0" ]
null
null
null
WWW/pycopia/WWW/website.py
kdart/pycopia3
8a7c820f096245411eabbb72345e4f30a35988b6
[ "Apache-2.0" ]
1
2018-11-26T15:00:21.000Z
2018-11-26T15:00:21.000Z
#!/usr/bin/python3.4 # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Master web site control. Handles virtual host setups using lighttpd as a front-end. This controller also handles the lighttpd process itself. So if you have it enabled in your site's init.d system you should disable it if they are configured to listen on the same port. The lighttpd server is run under the pycopia process manager and will be automatically restarted if it abnormally exits. The pycopia web framework provides its own configuration file. Changes made to any installed lighttpd configuration won't be used. The lighttpd build was configured like this: ./configure --prefix=/usr --with-fam --with-openssl --with-attr --with-pcre --with-zlib --disable-ipv6 """ import sys import os import socket import traceback from pycopia import basicconfig from pycopia import logging from pycopia import passwd from pycopia import proctools from pycopia.OS import procutils LTCONFIG = "/etc/pycopia/lighttpd/lighttpd.conf" # Master site config. controls all virtual host configuration. SITE_CONFIG = "/etc/pycopia/website.conf" LIGHTTPD = procutils.which("lighttpd") def start(config): setup(config) if config.DAEMON: from pycopia import daemonize from pycopia import logfile lf = logfile.ManagedStdio(config.LOGFILENAME) daemonize.daemonize(lf, pidfile=config.PIDFILE) else: lf = sys.stderr with open(config.PIDFILE, "w") as fo: fo.write("{}\n".format(os.getpid())) start_proc_manager(config, lf) def setup(config): siteuser = passwd.getpwnam(config.SITEUSER) siteowner = passwd.getpwnam(config.SITEOWNER) logroot = config.get("LOGROOT", "/var/log/lighttpd") fqdn = socket.getfqdn() def _mkdir(path): if not os.path.isdir(path): os.mkdir(path, 0o755) os.chown(path, siteowner.uid, siteowner.gid) for vhost in config.VHOSTS.keys(): vhostdir = config.SITEROOT + "/" + vhost vhostlogdir = logroot + "/" + vhost if not os.path.isdir(vhostlogdir): os.mkdir(vhostlogdir, 0o755) os.chown(vhostlogdir, siteuser.uid, siteuser.gid) if not os.path.isdir(vhostdir): if fqdn == vhost: os.symlink(config.SITEROOT + "/localhost", vhostdir) else: _mkdir(vhostdir) _mkdir(vhostdir + "/htdocs") _mkdir(vhostdir + "/htdocs-secure") _mkdir(vhostdir + "/static") _mkdir(vhostdir + "/media") _mkdir(vhostdir + "/media/js") _mkdir(vhostdir + "/media/css") _mkdir(vhostdir + "/media/images") def start_proc_manager(config, logfile): from pycopia import asyncio pm = proctools.get_procmanager() libexec = config.get("LIBEXEC", "/usr/libexec/pycopia") for name, serverlist in list(config.VHOSTS.items()): for servername in serverlist: logging.info("Starting {} for vhost {}.".format(servername, name)) cmd = "{}/scgi_server -n {}".format(libexec, servername) p = pm.spawnprocess( ServerProcess, cmd, persistent=True, logfile=logfile) asyncio.poller.register(p) if config.USEFRONTEND: if asyncio.poller: pm.spawnpipe("{} -D -f {}".format(LIGHTTPD, LTCONFIG), persistent=True, logfile=logfile) else: # no servers, just run frontend alone pm.spawnpipe("{} -f {}".format(LIGHTTPD, LTCONFIG)) try: asyncio.poller.loop() logging.info("No servers, exited loop.") except KeyboardInterrupt: pass if asyncio.poller: asyncio.poller.unregister_all() for proc in pm.getprocs(): proc.killwait() if os.path.exists(config.PIDFILE): os.unlink(config.PIDFILE) class ServerProcess(proctools.ProcessPipe): def __init__(self, cmd, debug=False, **kwargs): super().__init__(cmd, **kwargs) self._debug = debug def exception_handler(self, ex, val, tb): traceback.print_exception(ex, val, tb, file=self._log) def stop(config): import signal if os.path.exists(config.PIDFILE): pid = int(open(config.PIDFILE).read().strip()) os.kill(pid, signal.SIGINT) def status(config): from pycopia.OS import procfs if os.path.exists(config.PIDFILE): pid = int(open(config.PIDFILE).read().strip()) s = procfs.ProcStat(pid) if s and s.command.find(config.SERVERNAME) >= 0: print("Process manager running: pid %s: %s." % (pid, s.cmdline)) return 0 print("Process manager not running.") return 1 def robots(config): user = passwd.getpwnam(config.SITEOWNER) for vhost, scripts in list(config.VHOSTS.items()): rname = os.path.join(config.SITEROOT, vhost, "htdocs", "robots.txt") if os.path.exists(rname): if config.FORCE: bakname = rname + ".bak" if os.path.exists(bakname): os.unlink(bakname) os.rename(rname, bakname) else: continue with open(rname, "w") as fo: fo.write(_get_robots_txt(scripts)) os.chown(rname, user.uid, user.gid) def check(config): "Check the lighttpd configuration." pm = proctools.get_procmanager() cmd = "{} -p -f {}".format(LIGHTTPD, LTCONFIG) print("Running:", cmd) proc = pm.spawnpipe(cmd) out = proc.read() es = proc.wait() if es: sys.stdout.buffer.write(out) else: from pycopia.WWW import serverconfig print("ERROR: {}".format(es)) sys.stdout.buffer.write(out) print("config_server output:") serverconfig.config_lighttpd(["config_lighttpd"], sys.stdout) def _get_robots_txt(scripts): s = ["User-agent: *"] for name in scripts: s.append("Disallow: /%s" % (name,)) s.append("") return "\n".join(s) # Don't use a docstring since server is run in optimized mode. _doc = """Pycopia server controller. {progname} [-?hnN] [-l <logfilename>] [-p <pidfilename>] [<command>] Options: -? or -h Show this help. -l override log file name. -p override pid file name. -F force actions, such as overwriting files. -n do NOT become a daemon when starting. -d Enable automatic debugging. -N do NOT start the web server front end (lighttpd). -f <cffile> Override config file to use. -D <fqdn> Override FQDN config variable. Where command is one of: setup - create directory structures according to config file entries. start - start all web services and virtual hosts stop - stop a running server status - status of server robots - update robots.txt files. check - Emit the generated lighttpd config, so you can check it. """ def main(argv): import getopt daemonize = True frontend = True force = False domainname = None servername = os.path.basename(argv[0]) logfilename = "/var/log/{}.log".format(servername) pidfilename = "/run/{}.pid".format(servername) cffile = SITE_CONFIG try: optlist, args = getopt.getopt(argv[1:], "?hdnNFl:p:f:D:") except getopt.GetoptError: print(_doc.format(progname=servername)) return for opt, optarg in optlist: if opt in ("-?", "-h"): print(_doc.format(progname=servername)) return 2 elif opt == "-l": logfilename = optarg elif opt == "-n": daemonize = False elif opt == "-N": frontend = False elif opt == "-D": domainname = optarg elif opt == "-f": cffile = optarg elif opt == "-F": force = True elif opt == "-p": pidfilename = optarg elif opt == "-d": from pycopia import autodebug # noqa FQDN = domainname or socket.getfqdn() config = basicconfig.get_config(cffile, FQDN=FQDN) config.SERVERNAME = servername config.LOGFILENAME = logfilename config.PIDFILE = pidfilename config.DAEMON = daemonize config.FORCE = force config.USEFRONTEND = frontend config.ARGV = args if not args: return status(config) cmd = args[0] if cmd.startswith("stat"): return status(config) elif cmd.startswith("set"): return setup(config) elif cmd.startswith("star"): return start(config) elif cmd.startswith("stop"): return stop(config) elif cmd.startswith("rob"): return robots(config) elif cmd.startswith("che"): return check(config) else: print(_doc.format(progname=servername)) return 2
31.6
78
0.63141
import sys import os import socket import traceback from pycopia import basicconfig from pycopia import logging from pycopia import passwd from pycopia import proctools from pycopia.OS import procutils LTCONFIG = "/etc/pycopia/lighttpd/lighttpd.conf" SITE_CONFIG = "/etc/pycopia/website.conf" LIGHTTPD = procutils.which("lighttpd") def start(config): setup(config) if config.DAEMON: from pycopia import daemonize from pycopia import logfile lf = logfile.ManagedStdio(config.LOGFILENAME) daemonize.daemonize(lf, pidfile=config.PIDFILE) else: lf = sys.stderr with open(config.PIDFILE, "w") as fo: fo.write("{}\n".format(os.getpid())) start_proc_manager(config, lf) def setup(config): siteuser = passwd.getpwnam(config.SITEUSER) siteowner = passwd.getpwnam(config.SITEOWNER) logroot = config.get("LOGROOT", "/var/log/lighttpd") fqdn = socket.getfqdn() def _mkdir(path): if not os.path.isdir(path): os.mkdir(path, 0o755) os.chown(path, siteowner.uid, siteowner.gid) for vhost in config.VHOSTS.keys(): vhostdir = config.SITEROOT + "/" + vhost vhostlogdir = logroot + "/" + vhost if not os.path.isdir(vhostlogdir): os.mkdir(vhostlogdir, 0o755) os.chown(vhostlogdir, siteuser.uid, siteuser.gid) if not os.path.isdir(vhostdir): if fqdn == vhost: os.symlink(config.SITEROOT + "/localhost", vhostdir) else: _mkdir(vhostdir) _mkdir(vhostdir + "/htdocs") _mkdir(vhostdir + "/htdocs-secure") _mkdir(vhostdir + "/static") _mkdir(vhostdir + "/media") _mkdir(vhostdir + "/media/js") _mkdir(vhostdir + "/media/css") _mkdir(vhostdir + "/media/images") def start_proc_manager(config, logfile): from pycopia import asyncio pm = proctools.get_procmanager() libexec = config.get("LIBEXEC", "/usr/libexec/pycopia") for name, serverlist in list(config.VHOSTS.items()): for servername in serverlist: logging.info("Starting {} for vhost {}.".format(servername, name)) cmd = "{}/scgi_server -n {}".format(libexec, servername) p = pm.spawnprocess( ServerProcess, cmd, persistent=True, logfile=logfile) asyncio.poller.register(p) if config.USEFRONTEND: if asyncio.poller: pm.spawnpipe("{} -D -f {}".format(LIGHTTPD, LTCONFIG), persistent=True, logfile=logfile) else: pm.spawnpipe("{} -f {}".format(LIGHTTPD, LTCONFIG)) try: asyncio.poller.loop() logging.info("No servers, exited loop.") except KeyboardInterrupt: pass if asyncio.poller: asyncio.poller.unregister_all() for proc in pm.getprocs(): proc.killwait() if os.path.exists(config.PIDFILE): os.unlink(config.PIDFILE) class ServerProcess(proctools.ProcessPipe): def __init__(self, cmd, debug=False, **kwargs): super().__init__(cmd, **kwargs) self._debug = debug def exception_handler(self, ex, val, tb): traceback.print_exception(ex, val, tb, file=self._log) def stop(config): import signal if os.path.exists(config.PIDFILE): pid = int(open(config.PIDFILE).read().strip()) os.kill(pid, signal.SIGINT) def status(config): from pycopia.OS import procfs if os.path.exists(config.PIDFILE): pid = int(open(config.PIDFILE).read().strip()) s = procfs.ProcStat(pid) if s and s.command.find(config.SERVERNAME) >= 0: print("Process manager running: pid %s: %s." % (pid, s.cmdline)) return 0 print("Process manager not running.") return 1 def robots(config): user = passwd.getpwnam(config.SITEOWNER) for vhost, scripts in list(config.VHOSTS.items()): rname = os.path.join(config.SITEROOT, vhost, "htdocs", "robots.txt") if os.path.exists(rname): if config.FORCE: bakname = rname + ".bak" if os.path.exists(bakname): os.unlink(bakname) os.rename(rname, bakname) else: continue with open(rname, "w") as fo: fo.write(_get_robots_txt(scripts)) os.chown(rname, user.uid, user.gid) def check(config): pm = proctools.get_procmanager() cmd = "{} -p -f {}".format(LIGHTTPD, LTCONFIG) print("Running:", cmd) proc = pm.spawnpipe(cmd) out = proc.read() es = proc.wait() if es: sys.stdout.buffer.write(out) else: from pycopia.WWW import serverconfig print("ERROR: {}".format(es)) sys.stdout.buffer.write(out) print("config_server output:") serverconfig.config_lighttpd(["config_lighttpd"], sys.stdout) def _get_robots_txt(scripts): s = ["User-agent: *"] for name in scripts: s.append("Disallow: /%s" % (name,)) s.append("") return "\n".join(s) _doc = """Pycopia server controller. {progname} [-?hnN] [-l <logfilename>] [-p <pidfilename>] [<command>] Options: -? or -h Show this help. -l override log file name. -p override pid file name. -F force actions, such as overwriting files. -n do NOT become a daemon when starting. -d Enable automatic debugging. -N do NOT start the web server front end (lighttpd). -f <cffile> Override config file to use. -D <fqdn> Override FQDN config variable. Where command is one of: setup - create directory structures according to config file entries. start - start all web services and virtual hosts stop - stop a running server status - status of server robots - update robots.txt files. check - Emit the generated lighttpd config, so you can check it. """ def main(argv): import getopt daemonize = True frontend = True force = False domainname = None servername = os.path.basename(argv[0]) logfilename = "/var/log/{}.log".format(servername) pidfilename = "/run/{}.pid".format(servername) cffile = SITE_CONFIG try: optlist, args = getopt.getopt(argv[1:], "?hdnNFl:p:f:D:") except getopt.GetoptError: print(_doc.format(progname=servername)) return for opt, optarg in optlist: if opt in ("-?", "-h"): print(_doc.format(progname=servername)) return 2 elif opt == "-l": logfilename = optarg elif opt == "-n": daemonize = False elif opt == "-N": frontend = False elif opt == "-D": domainname = optarg elif opt == "-f": cffile = optarg elif opt == "-F": force = True elif opt == "-p": pidfilename = optarg elif opt == "-d": from pycopia import autodebug # noqa FQDN = domainname or socket.getfqdn() config = basicconfig.get_config(cffile, FQDN=FQDN) config.SERVERNAME = servername config.LOGFILENAME = logfilename config.PIDFILE = pidfilename config.DAEMON = daemonize config.FORCE = force config.USEFRONTEND = frontend config.ARGV = args if not args: return status(config) cmd = args[0] if cmd.startswith("stat"): return status(config) elif cmd.startswith("set"): return setup(config) elif cmd.startswith("star"): return start(config) elif cmd.startswith("stop"): return stop(config) elif cmd.startswith("rob"): return robots(config) elif cmd.startswith("che"): return check(config) else: print(_doc.format(progname=servername)) return 2
true
true
1c3552d4de8a30c043890bd176251ff14e9a9c7f
10,537
py
Python
keystone-moon/keystone/token/providers/fernet/utils.py
hashnfv/hashnfv-moon
daaba34fa2ed4426bc0fde359e54a5e1b872208c
[ "Apache-2.0" ]
null
null
null
keystone-moon/keystone/token/providers/fernet/utils.py
hashnfv/hashnfv-moon
daaba34fa2ed4426bc0fde359e54a5e1b872208c
[ "Apache-2.0" ]
null
null
null
keystone-moon/keystone/token/providers/fernet/utils.py
hashnfv/hashnfv-moon
daaba34fa2ed4426bc0fde359e54a5e1b872208c
[ "Apache-2.0" ]
1
2021-03-21T11:38:30.000Z
2021-03-21T11:38:30.000Z
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat from cryptography import fernet from oslo_config import cfg from oslo_log import log from keystone.i18n import _LE, _LW, _LI LOG = log.getLogger(__name__) CONF = cfg.CONF def validate_key_repository(requires_write=False): """Validate permissions on the key repository directory.""" # NOTE(lbragstad): We shouldn't need to check if the directory was passed # in as None because we don't set allow_no_values to True. # ensure current user has sufficient access to the key repository is_valid = (os.access(CONF.fernet_tokens.key_repository, os.R_OK) and os.access(CONF.fernet_tokens.key_repository, os.X_OK)) if requires_write: is_valid = (is_valid and os.access(CONF.fernet_tokens.key_repository, os.W_OK)) if not is_valid: LOG.error( _LE('Either [fernet_tokens] key_repository does not exist or ' 'Keystone does not have sufficient permission to access it: ' '%s'), CONF.fernet_tokens.key_repository) else: # ensure the key repository isn't world-readable stat_info = os.stat(CONF.fernet_tokens.key_repository) if(stat_info.st_mode & stat.S_IROTH or stat_info.st_mode & stat.S_IXOTH): LOG.warning(_LW( '[fernet_tokens] key_repository is world readable: %s'), CONF.fernet_tokens.key_repository) return is_valid def _convert_to_integers(id_value): """Cast user and group system identifiers to integers.""" # NOTE(lbragstad) os.chown() will raise a TypeError here if # keystone_user_id and keystone_group_id are not integers. Let's # cast them to integers if we can because it's possible to pass non-integer # values into the fernet_setup utility. try: id_int = int(id_value) except ValueError as e: msg = _LE('Unable to convert Keystone user or group ID. Error: %s') LOG.error(msg, e) raise return id_int def create_key_directory(keystone_user_id=None, keystone_group_id=None): """If the configured key directory does not exist, attempt to create it.""" if not os.access(CONF.fernet_tokens.key_repository, os.F_OK): LOG.info(_LI( '[fernet_tokens] key_repository does not appear to exist; ' 'attempting to create it')) try: os.makedirs(CONF.fernet_tokens.key_repository, 0o700) except OSError: LOG.error(_LE( 'Failed to create [fernet_tokens] key_repository: either it ' 'already exists or you don\'t have sufficient permissions to ' 'create it')) if keystone_user_id and keystone_group_id: os.chown( CONF.fernet_tokens.key_repository, keystone_user_id, keystone_group_id) elif keystone_user_id or keystone_group_id: LOG.warning(_LW( 'Unable to change the ownership of [fernet_tokens] ' 'key_repository without a keystone user ID and keystone group ' 'ID both being provided: %s') % CONF.fernet_tokens.key_repository) def _create_new_key(keystone_user_id, keystone_group_id): """Securely create a new encryption key. Create a new key that is readable by the Keystone group and Keystone user. """ key = fernet.Fernet.generate_key() # key is bytes # This ensures the key created is not world-readable old_umask = os.umask(0o177) if keystone_user_id and keystone_group_id: old_egid = os.getegid() old_euid = os.geteuid() os.setegid(keystone_group_id) os.seteuid(keystone_user_id) elif keystone_user_id or keystone_group_id: LOG.warning(_LW( 'Unable to change the ownership of the new key without a keystone ' 'user ID and keystone group ID both being provided: %s') % CONF.fernet_tokens.key_repository) # Determine the file name of the new key key_file = os.path.join(CONF.fernet_tokens.key_repository, '0') try: with open(key_file, 'w') as f: f.write(key.decode('utf-8')) # convert key to str for the file. finally: # After writing the key, set the umask back to it's original value. Do # the same with group and user identifiers if a Keystone group or user # was supplied. os.umask(old_umask) if keystone_user_id and keystone_group_id: os.seteuid(old_euid) os.setegid(old_egid) LOG.info(_LI('Created a new key: %s'), key_file) def initialize_key_repository(keystone_user_id=None, keystone_group_id=None): """Create a key repository and bootstrap it with a key. :param keystone_user_id: User ID of the Keystone user. :param keystone_group_id: Group ID of the Keystone user. """ # make sure we have work to do before proceeding if os.access(os.path.join(CONF.fernet_tokens.key_repository, '0'), os.F_OK): LOG.info(_LI('Key repository is already initialized; aborting.')) return # bootstrap an existing key _create_new_key(keystone_user_id, keystone_group_id) # ensure that we end up with a primary and secondary key rotate_keys(keystone_user_id, keystone_group_id) def rotate_keys(keystone_user_id=None, keystone_group_id=None): """Create a new primary key and revoke excess active keys. :param keystone_user_id: User ID of the Keystone user. :param keystone_group_id: Group ID of the Keystone user. Key rotation utilizes the following behaviors: - The highest key number is used as the primary key (used for encryption). - All keys can be used for decryption. - New keys are always created as key "0," which serves as a placeholder before promoting it to be the primary key. This strategy allows you to safely perform rotation on one node in a cluster, before syncing the results of the rotation to all other nodes (during both key rotation and synchronization, all nodes must recognize all primary keys). """ # read the list of key files key_files = dict() for filename in os.listdir(CONF.fernet_tokens.key_repository): path = os.path.join(CONF.fernet_tokens.key_repository, str(filename)) if os.path.isfile(path): try: key_id = int(filename) except ValueError: # nosec : name isn't a number, ignore the file. pass else: key_files[key_id] = path LOG.info(_LI('Starting key rotation with %(count)s key files: %(list)s'), { 'count': len(key_files), 'list': list(key_files.values())}) # determine the number of the new primary key current_primary_key = max(key_files.keys()) LOG.info(_LI('Current primary key is: %s'), current_primary_key) new_primary_key = current_primary_key + 1 LOG.info(_LI('Next primary key will be: %s'), new_primary_key) # promote the next primary key to be the primary os.rename( os.path.join(CONF.fernet_tokens.key_repository, '0'), os.path.join(CONF.fernet_tokens.key_repository, str(new_primary_key))) key_files.pop(0) key_files[new_primary_key] = os.path.join( CONF.fernet_tokens.key_repository, str(new_primary_key)) LOG.info(_LI('Promoted key 0 to be the primary: %s'), new_primary_key) # add a new key to the rotation, which will be the *next* primary _create_new_key(keystone_user_id, keystone_group_id) max_active_keys = CONF.fernet_tokens.max_active_keys # check for bad configuration if max_active_keys < 1: LOG.warning(_LW( '[fernet_tokens] max_active_keys must be at least 1 to maintain a ' 'primary key.')) max_active_keys = 1 # purge excess keys # Note that key_files doesn't contain the new active key that was created, # only the old active keys. keys = sorted(key_files.keys(), reverse=True) while len(keys) > (max_active_keys - 1): index_to_purge = keys.pop() key_to_purge = key_files[index_to_purge] LOG.info(_LI('Excess key to purge: %s'), key_to_purge) os.remove(key_to_purge) def load_keys(): """Load keys from disk into a list. The first key in the list is the primary key used for encryption. All other keys are active secondary keys that can be used for decrypting tokens. """ if not validate_key_repository(): return [] # build a dictionary of key_number:encryption_key pairs keys = dict() for filename in os.listdir(CONF.fernet_tokens.key_repository): path = os.path.join(CONF.fernet_tokens.key_repository, str(filename)) if os.path.isfile(path): with open(path, 'r') as key_file: try: key_id = int(filename) except ValueError: # nosec : filename isn't a number, ignore # this file since it's not a key. pass else: keys[key_id] = key_file.read() if len(keys) != CONF.fernet_tokens.max_active_keys: # If there haven't been enough key rotations to reach max_active_keys, # or if the configured value of max_active_keys has changed since the # last rotation, then reporting the discrepancy might be useful. Once # the number of keys matches max_active_keys, this log entry is too # repetitive to be useful. LOG.info(_LI( 'Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: ' '%(dir)s'), { 'count': len(keys), 'max': CONF.fernet_tokens.max_active_keys, 'dir': CONF.fernet_tokens.key_repository}) # return the encryption_keys, sorted by key number, descending return [keys[x] for x in sorted(keys.keys(), reverse=True)]
38.881919
79
0.662617
import os import stat from cryptography import fernet from oslo_config import cfg from oslo_log import log from keystone.i18n import _LE, _LW, _LI LOG = log.getLogger(__name__) CONF = cfg.CONF def validate_key_repository(requires_write=False): # in as None because we don't set allow_no_values to True. is_valid = (os.access(CONF.fernet_tokens.key_repository, os.R_OK) and os.access(CONF.fernet_tokens.key_repository, os.X_OK)) if requires_write: is_valid = (is_valid and os.access(CONF.fernet_tokens.key_repository, os.W_OK)) if not is_valid: LOG.error( _LE('Either [fernet_tokens] key_repository does not exist or ' 'Keystone does not have sufficient permission to access it: ' '%s'), CONF.fernet_tokens.key_repository) else: stat_info = os.stat(CONF.fernet_tokens.key_repository) if(stat_info.st_mode & stat.S_IROTH or stat_info.st_mode & stat.S_IXOTH): LOG.warning(_LW( '[fernet_tokens] key_repository is world readable: %s'), CONF.fernet_tokens.key_repository) return is_valid def _convert_to_integers(id_value): # NOTE(lbragstad) os.chown() will raise a TypeError here if # keystone_user_id and keystone_group_id are not integers. Let's # values into the fernet_setup utility. try: id_int = int(id_value) except ValueError as e: msg = _LE('Unable to convert Keystone user or group ID. Error: %s') LOG.error(msg, e) raise return id_int def create_key_directory(keystone_user_id=None, keystone_group_id=None): if not os.access(CONF.fernet_tokens.key_repository, os.F_OK): LOG.info(_LI( '[fernet_tokens] key_repository does not appear to exist; ' 'attempting to create it')) try: os.makedirs(CONF.fernet_tokens.key_repository, 0o700) except OSError: LOG.error(_LE( 'Failed to create [fernet_tokens] key_repository: either it ' 'already exists or you don\'t have sufficient permissions to ' 'create it')) if keystone_user_id and keystone_group_id: os.chown( CONF.fernet_tokens.key_repository, keystone_user_id, keystone_group_id) elif keystone_user_id or keystone_group_id: LOG.warning(_LW( 'Unable to change the ownership of [fernet_tokens] ' 'key_repository without a keystone user ID and keystone group ' 'ID both being provided: %s') % CONF.fernet_tokens.key_repository) def _create_new_key(keystone_user_id, keystone_group_id): key = fernet.Fernet.generate_key() old_umask = os.umask(0o177) if keystone_user_id and keystone_group_id: old_egid = os.getegid() old_euid = os.geteuid() os.setegid(keystone_group_id) os.seteuid(keystone_user_id) elif keystone_user_id or keystone_group_id: LOG.warning(_LW( 'Unable to change the ownership of the new key without a keystone ' 'user ID and keystone group ID both being provided: %s') % CONF.fernet_tokens.key_repository) key_file = os.path.join(CONF.fernet_tokens.key_repository, '0') try: with open(key_file, 'w') as f: f.write(key.decode('utf-8')) finally: # the same with group and user identifiers if a Keystone group or user # was supplied. os.umask(old_umask) if keystone_user_id and keystone_group_id: os.seteuid(old_euid) os.setegid(old_egid) LOG.info(_LI('Created a new key: %s'), key_file) def initialize_key_repository(keystone_user_id=None, keystone_group_id=None): # make sure we have work to do before proceeding if os.access(os.path.join(CONF.fernet_tokens.key_repository, '0'), os.F_OK): LOG.info(_LI('Key repository is already initialized; aborting.')) return # bootstrap an existing key _create_new_key(keystone_user_id, keystone_group_id) # ensure that we end up with a primary and secondary key rotate_keys(keystone_user_id, keystone_group_id) def rotate_keys(keystone_user_id=None, keystone_group_id=None): # read the list of key files key_files = dict() for filename in os.listdir(CONF.fernet_tokens.key_repository): path = os.path.join(CONF.fernet_tokens.key_repository, str(filename)) if os.path.isfile(path): try: key_id = int(filename) except ValueError: # nosec : name isn't a number, ignore the file. pass else: key_files[key_id] = path LOG.info(_LI('Starting key rotation with %(count)s key files: %(list)s'), { 'count': len(key_files), 'list': list(key_files.values())}) current_primary_key = max(key_files.keys()) LOG.info(_LI('Current primary key is: %s'), current_primary_key) new_primary_key = current_primary_key + 1 LOG.info(_LI('Next primary key will be: %s'), new_primary_key) os.rename( os.path.join(CONF.fernet_tokens.key_repository, '0'), os.path.join(CONF.fernet_tokens.key_repository, str(new_primary_key))) key_files.pop(0) key_files[new_primary_key] = os.path.join( CONF.fernet_tokens.key_repository, str(new_primary_key)) LOG.info(_LI('Promoted key 0 to be the primary: %s'), new_primary_key) _create_new_key(keystone_user_id, keystone_group_id) max_active_keys = CONF.fernet_tokens.max_active_keys if max_active_keys < 1: LOG.warning(_LW( '[fernet_tokens] max_active_keys must be at least 1 to maintain a ' 'primary key.')) max_active_keys = 1 # only the old active keys. keys = sorted(key_files.keys(), reverse=True) while len(keys) > (max_active_keys - 1): index_to_purge = keys.pop() key_to_purge = key_files[index_to_purge] LOG.info(_LI('Excess key to purge: %s'), key_to_purge) os.remove(key_to_purge) def load_keys(): if not validate_key_repository(): return [] # build a dictionary of key_number:encryption_key pairs keys = dict() for filename in os.listdir(CONF.fernet_tokens.key_repository): path = os.path.join(CONF.fernet_tokens.key_repository, str(filename)) if os.path.isfile(path): with open(path, 'r') as key_file: try: key_id = int(filename) except ValueError: # nosec : filename isn't a number, ignore pass else: keys[key_id] = key_file.read() if len(keys) != CONF.fernet_tokens.max_active_keys: # If there haven't been enough key rotations to reach max_active_keys, LOG.info(_LI( 'Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: ' '%(dir)s'), { 'count': len(keys), 'max': CONF.fernet_tokens.max_active_keys, 'dir': CONF.fernet_tokens.key_repository}) return [keys[x] for x in sorted(keys.keys(), reverse=True)]
true
true
1c35532eedb22971424928ef62d8037c39a834ae
4,061
py
Python
test/circuit/test_european_call_pricing_objective.py
SooluThomas/qiskit-finance
3e25551b55cdfebfeba1b3889a1cb930b83e57e3
[ "Apache-2.0" ]
1
2021-08-04T14:36:22.000Z
2021-08-04T14:36:22.000Z
test/circuit/test_european_call_pricing_objective.py
SooluThomas/qiskit-finance
3e25551b55cdfebfeba1b3889a1cb930b83e57e3
[ "Apache-2.0" ]
null
null
null
test/circuit/test_european_call_pricing_objective.py
SooluThomas/qiskit-finance
3e25551b55cdfebfeba1b3889a1cb930b83e57e3
[ "Apache-2.0" ]
null
null
null
# This code is part of Qiskit. # # (C) Copyright IBM 2020, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Test EuropeanCallPricingObjective""" import unittest from test import QiskitFinanceTestCase import numpy as np from qiskit.utils import algorithm_globals, QuantumInstance from qiskit.algorithms import IterativeAmplitudeEstimation, EstimationProblem from qiskit.circuit.library import LinearAmplitudeFunction, TwoLocal from qiskit.quantum_info import Operator from qiskit_finance.circuit.library import EuropeanCallPricingObjective, NormalDistribution class TestEuropeanCallExpectedValue(QiskitFinanceTestCase): """Tests EuropeanCallPricingObjective.""" def setUp(self): super().setUp() self.seed = 457 algorithm_globals.random_seed = self.seed def test_ecev_circuit(self): """Test the expected circuit. If it equals the correct ``LinearAmplitudeFunction`` we know the circuit is correct. """ num_qubits = 3 rescaling_factor = 0.1 strike_price = 0.5 bounds = (0, 2) ecev = EuropeanCallPricingObjective(num_qubits, strike_price, rescaling_factor, bounds) breakpoints = [0, strike_price] slopes = [0, 1] offsets = [0, 0] image = (0, 2 - strike_price) domain = (0, 2) linear_function = LinearAmplitudeFunction( num_qubits, slopes, offsets, domain=domain, image=image, breakpoints=breakpoints, rescaling_factor=rescaling_factor, ) self.assertTrue(Operator(ecev).equiv(linear_function)) def test_application(self): """Test an end-to-end application.""" try: from qiskit import ( Aer, ) # pylint: disable=unused-import,import-outside-toplevel except ImportError as ex: # pylint: disable=broad-except self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return bounds = np.array([0.0, 7.0]) num_qubits = 3 # the distribution circuit is a normal distribution plus a QGAN-trained ansatz circuit dist = NormalDistribution(num_qubits, mu=1, sigma=1, bounds=bounds) ansatz = TwoLocal(num_qubits, "ry", "cz", reps=1, entanglement="circular") trained_params = [ 0.29399714, 0.38853322, 0.9557694, 0.07245791, 6.02626428, 0.13537225, ] ansatz.assign_parameters(trained_params, inplace=True) dist.compose(ansatz, inplace=True) # create the European call expected value strike_price = 2 rescaling_factor = 0.25 european_call = EuropeanCallPricingObjective( num_state_qubits=num_qubits, strike_price=strike_price, rescaling_factor=rescaling_factor, bounds=bounds, ) # create the state preparation circuit state_preparation = european_call.compose(dist, front=True) problem = EstimationProblem( state_preparation=state_preparation, objective_qubits=[num_qubits], post_processing=european_call.post_processing, ) q_i = QuantumInstance( Aer.get_backend("aer_simulator"), seed_simulator=125, seed_transpiler=80 ) iae = IterativeAmplitudeEstimation(epsilon_target=0.01, alpha=0.05, quantum_instance=q_i) result = iae.estimate(problem) self.assertAlmostEqual(result.estimation_processed, 1.0127253837345427) if __name__ == "__main__": unittest.main()
33.561983
97
0.657474
import unittest from test import QiskitFinanceTestCase import numpy as np from qiskit.utils import algorithm_globals, QuantumInstance from qiskit.algorithms import IterativeAmplitudeEstimation, EstimationProblem from qiskit.circuit.library import LinearAmplitudeFunction, TwoLocal from qiskit.quantum_info import Operator from qiskit_finance.circuit.library import EuropeanCallPricingObjective, NormalDistribution class TestEuropeanCallExpectedValue(QiskitFinanceTestCase): def setUp(self): super().setUp() self.seed = 457 algorithm_globals.random_seed = self.seed def test_ecev_circuit(self): num_qubits = 3 rescaling_factor = 0.1 strike_price = 0.5 bounds = (0, 2) ecev = EuropeanCallPricingObjective(num_qubits, strike_price, rescaling_factor, bounds) breakpoints = [0, strike_price] slopes = [0, 1] offsets = [0, 0] image = (0, 2 - strike_price) domain = (0, 2) linear_function = LinearAmplitudeFunction( num_qubits, slopes, offsets, domain=domain, image=image, breakpoints=breakpoints, rescaling_factor=rescaling_factor, ) self.assertTrue(Operator(ecev).equiv(linear_function)) def test_application(self): try: from qiskit import ( Aer, ) except ImportError as ex: self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex))) return bounds = np.array([0.0, 7.0]) num_qubits = 3 # the distribution circuit is a normal distribution plus a QGAN-trained ansatz circuit dist = NormalDistribution(num_qubits, mu=1, sigma=1, bounds=bounds) ansatz = TwoLocal(num_qubits, "ry", "cz", reps=1, entanglement="circular") trained_params = [ 0.29399714, 0.38853322, 0.9557694, 0.07245791, 6.02626428, 0.13537225, ] ansatz.assign_parameters(trained_params, inplace=True) dist.compose(ansatz, inplace=True) # create the European call expected value strike_price = 2 rescaling_factor = 0.25 european_call = EuropeanCallPricingObjective( num_state_qubits=num_qubits, strike_price=strike_price, rescaling_factor=rescaling_factor, bounds=bounds, ) # create the state preparation circuit state_preparation = european_call.compose(dist, front=True) problem = EstimationProblem( state_preparation=state_preparation, objective_qubits=[num_qubits], post_processing=european_call.post_processing, ) q_i = QuantumInstance( Aer.get_backend("aer_simulator"), seed_simulator=125, seed_transpiler=80 ) iae = IterativeAmplitudeEstimation(epsilon_target=0.01, alpha=0.05, quantum_instance=q_i) result = iae.estimate(problem) self.assertAlmostEqual(result.estimation_processed, 1.0127253837345427) if __name__ == "__main__": unittest.main()
true
true
1c3553a19ff3ad671fcce5a890853439829c43a3
184
py
Python
Job-Interviews/Python/Bitwise/Problem1.py
JuanPabloMontoya271/ITC
f5899e7a8fed4e9f90e97bb3af635a276d9cf13a
[ "MIT" ]
1
2020-11-02T15:18:16.000Z
2020-11-02T15:18:16.000Z
Job-Interviews/Python/Bitwise/Problem1.py
JuanPabloMontoya271/ITC
f5899e7a8fed4e9f90e97bb3af635a276d9cf13a
[ "MIT" ]
null
null
null
Job-Interviews/Python/Bitwise/Problem1.py
JuanPabloMontoya271/ITC
f5899e7a8fed4e9f90e97bb3af635a276d9cf13a
[ "MIT" ]
1
2021-10-30T14:18:29.000Z
2021-10-30T14:18:29.000Z
#Count number of 1 bits in a number def count(n): counter = 0 while n: counter+= n&1 n>>=1 return counter count(10) print(bin(10>>2), bin(10))
15.333333
36
0.527174
def count(n): counter = 0 while n: counter+= n&1 n>>=1 return counter count(10) print(bin(10>>2), bin(10))
true
true
1c3554bbe05bcb3a6f3a5c0a1b58842c1085fb45
181
py
Python
config/views.py
joway/python-china
cb3a61ff335c887d2ec65e5948647e056f652eac
[ "MIT" ]
null
null
null
config/views.py
joway/python-china
cb3a61ff335c887d2ec65e5948647e056f652eac
[ "MIT" ]
null
null
null
config/views.py
joway/python-china
cb3a61ff335c887d2ec65e5948647e056f652eac
[ "MIT" ]
null
null
null
from django.shortcuts import render def home(request): return render(request, 'index.html', locals()) def topic(request): return render(request, 'topic.html', locals())
18.1
50
0.707182
from django.shortcuts import render def home(request): return render(request, 'index.html', locals()) def topic(request): return render(request, 'topic.html', locals())
true
true
1c3554f915c34409018ac1e4a215ca1f4cbbbd61
314
py
Python
Scripts/core/native/__init__.py
velocist/TS4CheatsInfo
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
[ "Apache-2.0" ]
null
null
null
Scripts/core/native/__init__.py
velocist/TS4CheatsInfo
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
[ "Apache-2.0" ]
null
null
null
Scripts/core/native/__init__.py
velocist/TS4CheatsInfo
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
[ "Apache-2.0" ]
null
null
null
# uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Core\native\__init__.py # Compiled at: 2014-12-16 01:27:05 # Size of source mod 2**32: 114 bytes pass
44.857143
107
0.719745
pass
true
true
1c3555912590c942e48bfd569e8e2f6c17ba0cf1
23,759
py
Python
dpaygo/storage.py
dpays/dpaygo
d0473799e8e9432a618736a281146d23eb433bf4
[ "MIT" ]
null
null
null
dpaygo/storage.py
dpays/dpaygo
d0473799e8e9432a618736a281146d23eb433bf4
[ "MIT" ]
null
null
null
dpaygo/storage.py
dpays/dpaygo
d0473799e8e9432a618736a281146d23eb433bf4
[ "MIT" ]
null
null
null
# This Python file uses the following encoding: utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import bytes from builtins import object from dpaygographenebase.py23 import py23_bytes, bytes_types import shutil import time import os import sqlite3 from .aes import AESCipher from appdirs import user_data_dir from datetime import datetime import logging from binascii import hexlify import random import hashlib from .exceptions import WrongMasterPasswordException, NoWriteAccess from .nodelist import NodeList log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) log.addHandler(logging.StreamHandler()) timeformat = "%Y%m%d-%H%M%S" class DataDir(object): """ This class ensures that the user's data is stored in its OS preotected user directory: **OSX:** * `~/Library/Application Support/<AppName>` **Windows:** * `C:\\Documents and Settings\\<User>\\Application Data\\Local Settings\\<AppAuthor>\\<AppName>` * `C:\\Documents and Settings\\<User>\\Application Data\\<AppAuthor>\\<AppName>` **Linux:** * `~/.local/share/<AppName>` Furthermore, it offers an interface to generated backups in the `backups/` directory every now and then. """ appname = "dpaygo" appauthor = "dpaygo" storageDatabase = "dpaygo.sqlite" data_dir = user_data_dir(appname, appauthor) sqlDataBaseFile = os.path.join(data_dir, storageDatabase) def __init__(self): #: Storage self.mkdir_p() def mkdir_p(self): """ Ensure that the directory in which the data is stored exists """ if os.path.isdir(self.data_dir): return else: try: os.makedirs(self.data_dir) except FileExistsError: self.sqlDataBaseFile = ":memory:" return except OSError: self.sqlDataBaseFile = ":memory:" return def sqlite3_backup(self, backupdir): """ Create timestamped database copy """ if self.sqlDataBaseFile == ":memory:": return if not os.path.isdir(backupdir): os.mkdir(backupdir) backup_file = os.path.join( backupdir, os.path.basename(self.storageDatabase) + datetime.utcnow().strftime("-" + timeformat)) self.sqlite3_copy(self.sqlDataBaseFile, backup_file) configStorage["lastBackup"] = datetime.utcnow().strftime(timeformat) def sqlite3_copy(self, src, dst): """Copy sql file from src to dst""" if self.sqlDataBaseFile == ":memory:": return if not os.path.isfile(src): return connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() # Lock database before making a backup cursor.execute('begin immediate') # Make new backup file shutil.copyfile(src, dst) log.info("Creating {}...".format(dst)) # Unlock database connection.rollback() def recover_with_latest_backup(self, backupdir="backups"): """ Replace database with latest backup""" file_date = 0 if self.sqlDataBaseFile == ":memory:": return if not os.path.isdir(backupdir): backupdir = os.path.join(self.data_dir, backupdir) if not os.path.isdir(backupdir): return newest_backup_file = None for filename in os.listdir(backupdir): backup_file = os.path.join(backupdir, filename) if os.stat(backup_file).st_ctime > file_date: if os.path.isfile(backup_file): file_date = os.stat(backup_file).st_ctime newest_backup_file = backup_file if newest_backup_file is not None: self.sqlite3_copy(newest_backup_file, self.sqlDataBaseFile) def clean_data(self): """ Delete files older than 70 days """ if self.sqlDataBaseFile == ":memory:": return log.info("Cleaning up old backups") for filename in os.listdir(self.data_dir): backup_file = os.path.join(self.data_dir, filename) if os.stat(backup_file).st_ctime < (time.time() - 70 * 86400): if os.path.isfile(backup_file): os.remove(backup_file) log.info("Deleting {}...".format(backup_file)) def refreshBackup(self): """ Make a new backup """ backupdir = os.path.join(self.data_dir, "backups") self.sqlite3_backup(backupdir) self.clean_data() class Key(DataDir): """ This is the key storage that stores the public key and the (possibly encrypted) private key in the `keys` table in the SQLite3 database. """ __tablename__ = 'keys' def __init__(self): super(Key, self).__init__() def exists_table(self): """ Check if the database table exists """ query = ("SELECT name FROM sqlite_master " "WHERE type='table' AND name=?", (self.__tablename__, )) try: connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) return True if cursor.fetchone() else False except sqlite3.OperationalError: self.sqlDataBaseFile = ":memory:" log.warning("Could not read(database: %s)" % (self.sqlDataBaseFile)) return True def create_table(self): """ Create the new table in the SQLite database """ query = ("CREATE TABLE {0} (" "id INTEGER PRIMARY KEY AUTOINCREMENT," "pub STRING(256)," "wif STRING(256))".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) connection.commit() def getPublicKeys(self): """ Returns the public keys stored in the database """ query = ("SELECT pub from {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(query) results = cursor.fetchall() return [x[0] for x in results] except sqlite3.OperationalError: return [] def getPrivateKeyForPublicKey(self, pub): """ Returns the (possibly encrypted) private key that corresponds to a public key :param str pub: Public key The encryption scheme is BIP38 """ query = ("SELECT wif from {0} WHERE pub=?".format(self.__tablename__), (pub,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) key = cursor.fetchone() if key: return key[0] else: return None def updateWif(self, pub, wif): """ Change the wif to a pubkey :param str pub: Public key :param str wif: Private key """ query = ("UPDATE {0} SET wif=? WHERE pub=?".format(self.__tablename__), (wif, pub)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def add(self, wif, pub): """ Add a new public/private key pair (correspondence has to be checked elsewhere!) :param str pub: Public key :param str wif: Private key """ if self.getPrivateKeyForPublicKey(pub): raise ValueError("Key already in storage") query = ("INSERT INTO {0} (pub, wif) VALUES (?, ?)".format(self.__tablename__), (pub, wif)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def delete(self, pub): """ Delete the key identified as `pub` :param str pub: Public key """ query = ("DELETE FROM {0} WHERE pub=?".format(self.__tablename__), (pub,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def wipe(self, sure=False): """Purge the entire wallet. No keys will survive this!""" if not sure: log.error( "You need to confirm that you are sure " "and understand the implications of " "wiping your wallet!" ) return else: query = ("DELETE FROM {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) connection.commit() class Token(DataDir): """ This is the token storage that stores the public username and the (possibly encrypted) token in the `token` table in the SQLite3 database. """ __tablename__ = 'token' def __init__(self): super(Token, self).__init__() def exists_table(self): """ Check if the database table exists """ query = ("SELECT name FROM sqlite_master " "WHERE type='table' AND name=?", (self.__tablename__, )) try: connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) return True if cursor.fetchone() else False except sqlite3.OperationalError: self.sqlDataBaseFile = ":memory:" log.warning("Could not read(database: %s)" % (self.sqlDataBaseFile)) return True def create_table(self): """ Create the new table in the SQLite database """ query = ("CREATE TABLE {0} (" "id INTEGER PRIMARY KEY AUTOINCREMENT," "name STRING(256)," "token STRING(256))".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) connection.commit() def getPublicNames(self): """ Returns the public names stored in the database """ query = ("SELECT name from {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(query) results = cursor.fetchall() return [x[0] for x in results] except sqlite3.OperationalError: return [] def getTokenForPublicName(self, name): """ Returns the (possibly encrypted) private token that corresponds to a public name :param str pub: Public name The encryption scheme is BIP38 """ query = ("SELECT token from {0} WHERE name=?".format(self.__tablename__), (name,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) token = cursor.fetchone() if token: return token[0] else: return None def updateToken(self, name, token): """ Change the token to a name :param str name: Public name :param str token: Private token """ query = ("UPDATE {0} SET token=? WHERE name=?".format(self.__tablename__), (token, name)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def add(self, name, token): """ Add a new public/private token pair (correspondence has to be checked elsewhere!) :param str name: Public name :param str token: Private token """ if self.getTokenForPublicName(name): raise ValueError("Key already in storage") query = ("INSERT INTO {0} (name, token) VALUES (?, ?)".format(self.__tablename__), (name, token)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def delete(self, name): """ Delete the key identified as `name` :param str name: Public name """ query = ("DELETE FROM {0} WHERE name=?".format(self.__tablename__), (name,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def wipe(self, sure=False): """Purge the entire wallet. No keys will survive this!""" if not sure: log.error( "You need to confirm that you are sure " "and understand the implications of " "wiping your wallet!" ) return else: query = ("DELETE FROM {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) connection.commit() class Configuration(DataDir): """ This is the configuration storage that stores key/value pairs in the `config` table of the SQLite3 database. """ __tablename__ = "config" #: Default configuration nodelist = NodeList() nodes = nodelist.get_nodes(normal=True, appbase=True, dev=False, testnet=False) config_defaults = { "node": nodes, "password_storage": "environment", "rpcpassword": "", "rpcuser": "", "order-expiration": 7 * 24 * 60 * 60, "client_id": "", "hot_sign_redirect_uri": None, "dpid_api_url": "https://dpayid.io/api/", "oauth_base_url": "https://dpayid.io/oauth2/"} def __init__(self): super(Configuration, self).__init__() def exists_table(self): """ Check if the database table exists """ query = ("SELECT name FROM sqlite_master " "WHERE type='table' AND name=?", (self.__tablename__,)) try: connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) return True if cursor.fetchone() else False except sqlite3.OperationalError: self.sqlDataBaseFile = ":memory:" log.warning("Could not read(database: %s)" % (self.sqlDataBaseFile)) return True def create_table(self): """ Create the new table in the SQLite database """ query = ("CREATE TABLE {0} (" "id INTEGER PRIMARY KEY AUTOINCREMENT," "key STRING(256)," "value STRING(256))".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(query) connection.commit() except sqlite3.OperationalError: log.error("Could not write to database: %s" % (self.__tablename__)) raise NoWriteAccess("Could not write to database: %s" % (self.__tablename__)) def checkBackup(self): """ Backup the SQL database every 7 days """ if ("lastBackup" not in configStorage or configStorage["lastBackup"] == ""): print("No backup has been created yet!") self.refreshBackup() try: if ( datetime.utcnow() - datetime.strptime(configStorage["lastBackup"], timeformat) ).days > 7: print("Backups older than 7 days!") self.refreshBackup() except: self.refreshBackup() def _haveKey(self, key): """ Is the key `key` available int he configuration? """ query = ("SELECT value FROM {0} WHERE key=?".format(self.__tablename__), (key,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(*query) return True if cursor.fetchone() else False except sqlite3.OperationalError: log.warning("Could not read %s (database: %s)" % (str(key), self.__tablename__)) return False def __getitem__(self, key): """ This method behaves differently from regular `dict` in that it returns `None` if a key is not found! """ query = ("SELECT value FROM {0} WHERE key=?".format(self.__tablename__), (key,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(*query) result = cursor.fetchone() if result: return result[0] else: if key in self.config_defaults: return self.config_defaults[key] else: return None except sqlite3.OperationalError: log.warning("Could not read %s (database: %s)" % (str(key), self.__tablename__)) if key in self.config_defaults: return self.config_defaults[key] else: return None def get(self, key, default=None): """ Return the key if exists or a default value """ if key in self: return self.__getitem__(key) else: return default def __contains__(self, key): if self._haveKey(key) or key in self.config_defaults: return True else: return False def __setitem__(self, key, value): if self._haveKey(key): query = ("UPDATE {0} SET value=? WHERE key=?".format(self.__tablename__), (value, key)) else: query = ("INSERT INTO {0} (key, value) VALUES (?, ?)".format(self.__tablename__), (key, value)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(*query) connection.commit() except sqlite3.OperationalError: log.error("Could not write to %s (database: %s)" % (str(key), self.__tablename__)) raise NoWriteAccess("Could not write to %s (database: %s)" % (str(key), self.__tablename__)) def delete(self, key): """ Delete a key from the configuration store """ query = ("DELETE FROM {0} WHERE key=?".format(self.__tablename__), (key,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(*query) connection.commit() except sqlite3.OperationalError: log.error("Could not write to %s (database: %s)" % (str(key), self.__tablename__)) raise NoWriteAccess("Could not write to %s (database: %s)" % (str(key), self.__tablename__)) def __iter__(self): return iter(list(self.items())) def items(self): query = ("SELECT key, value from {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) r = {} for key, value in cursor.fetchall(): r[key] = value return r def __len__(self): query = ("SELECT id from {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) return len(cursor.fetchall()) class MasterPassword(object): """ The keys are encrypted with a Masterpassword that is stored in the configurationStore. It has a checksum to verify correctness of the password """ password = "" # nosec decrypted_master = "" #: This key identifies the encrypted master password stored in the confiration config_key = "encrypted_master_password" def __init__(self, password): """ The encrypted private keys in `keys` are encrypted with a random encrypted masterpassword that is stored in the configuration. The password is used to encrypt this masterpassword. To decrypt the keys stored in the keys database, one must use BIP38, decrypt the masterpassword from the configuration store with the user password, and use the decrypted masterpassword to decrypt the BIP38 encrypted private keys from the keys storage! :param str password: Password to use for en-/de-cryption """ self.password = password if self.config_key not in configStorage: self.newMaster() self.saveEncrytpedMaster() else: self.decryptEncryptedMaster() def decryptEncryptedMaster(self): """ Decrypt the encrypted masterpassword """ aes = AESCipher(self.password) checksum, encrypted_master = configStorage[self.config_key].split("$") try: decrypted_master = aes.decrypt(encrypted_master) except: raise WrongMasterPasswordException if checksum != self.deriveChecksum(decrypted_master): raise WrongMasterPasswordException self.decrypted_master = decrypted_master def saveEncrytpedMaster(self): """ Store the encrypted master password in the configuration store """ configStorage[self.config_key] = self.getEncryptedMaster() def newMaster(self): """ Generate a new random masterpassword """ # make sure to not overwrite an existing key if (self.config_key in configStorage and configStorage[self.config_key]): return self.decrypted_master = hexlify(os.urandom(32)).decode("ascii") def deriveChecksum(self, s): """ Derive the checksum """ checksum = hashlib.sha256(py23_bytes(s, "ascii")).hexdigest() return checksum[:4] def getEncryptedMaster(self): """ Obtain the encrypted masterkey """ if not self.decrypted_master: raise Exception("master not decrypted") aes = AESCipher(self.password) return "{}${}".format(self.deriveChecksum(self.decrypted_master), aes.encrypt(self.decrypted_master)) def changePassword(self, newpassword): """ Change the password """ self.password = newpassword self.saveEncrytpedMaster() @staticmethod def wipe(sure=False): """Remove all keys from configStorage""" if not sure: log.error( "You need to confirm that you are sure " "and understand the implications of " "wiping your wallet!" ) return else: configStorage.delete(MasterPassword.config_key) # Create keyStorage keyStorage = Key() tokenStorage = Token() configStorage = Configuration() # Create Tables if database is brand new if not configStorage.exists_table(): configStorage.create_table() newKeyStorage = False if not keyStorage.exists_table(): newKeyStorage = True keyStorage.create_table() newTokenStorage = False if not tokenStorage.exists_table(): newTokenStorage = True tokenStorage.create_table()
35.094535
107
0.593038
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import bytes from builtins import object from dpaygographenebase.py23 import py23_bytes, bytes_types import shutil import time import os import sqlite3 from .aes import AESCipher from appdirs import user_data_dir from datetime import datetime import logging from binascii import hexlify import random import hashlib from .exceptions import WrongMasterPasswordException, NoWriteAccess from .nodelist import NodeList log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) log.addHandler(logging.StreamHandler()) timeformat = "%Y%m%d-%H%M%S" class DataDir(object): appname = "dpaygo" appauthor = "dpaygo" storageDatabase = "dpaygo.sqlite" data_dir = user_data_dir(appname, appauthor) sqlDataBaseFile = os.path.join(data_dir, storageDatabase) def __init__(self): self.mkdir_p() def mkdir_p(self): if os.path.isdir(self.data_dir): return else: try: os.makedirs(self.data_dir) except FileExistsError: self.sqlDataBaseFile = ":memory:" return except OSError: self.sqlDataBaseFile = ":memory:" return def sqlite3_backup(self, backupdir): if self.sqlDataBaseFile == ":memory:": return if not os.path.isdir(backupdir): os.mkdir(backupdir) backup_file = os.path.join( backupdir, os.path.basename(self.storageDatabase) + datetime.utcnow().strftime("-" + timeformat)) self.sqlite3_copy(self.sqlDataBaseFile, backup_file) configStorage["lastBackup"] = datetime.utcnow().strftime(timeformat) def sqlite3_copy(self, src, dst): if self.sqlDataBaseFile == ":memory:": return if not os.path.isfile(src): return connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute('begin immediate') shutil.copyfile(src, dst) log.info("Creating {}...".format(dst)) connection.rollback() def recover_with_latest_backup(self, backupdir="backups"): file_date = 0 if self.sqlDataBaseFile == ":memory:": return if not os.path.isdir(backupdir): backupdir = os.path.join(self.data_dir, backupdir) if not os.path.isdir(backupdir): return newest_backup_file = None for filename in os.listdir(backupdir): backup_file = os.path.join(backupdir, filename) if os.stat(backup_file).st_ctime > file_date: if os.path.isfile(backup_file): file_date = os.stat(backup_file).st_ctime newest_backup_file = backup_file if newest_backup_file is not None: self.sqlite3_copy(newest_backup_file, self.sqlDataBaseFile) def clean_data(self): if self.sqlDataBaseFile == ":memory:": return log.info("Cleaning up old backups") for filename in os.listdir(self.data_dir): backup_file = os.path.join(self.data_dir, filename) if os.stat(backup_file).st_ctime < (time.time() - 70 * 86400): if os.path.isfile(backup_file): os.remove(backup_file) log.info("Deleting {}...".format(backup_file)) def refreshBackup(self): backupdir = os.path.join(self.data_dir, "backups") self.sqlite3_backup(backupdir) self.clean_data() class Key(DataDir): __tablename__ = 'keys' def __init__(self): super(Key, self).__init__() def exists_table(self): query = ("SELECT name FROM sqlite_master " "WHERE type='table' AND name=?", (self.__tablename__, )) try: connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) return True if cursor.fetchone() else False except sqlite3.OperationalError: self.sqlDataBaseFile = ":memory:" log.warning("Could not read(database: %s)" % (self.sqlDataBaseFile)) return True def create_table(self): query = ("CREATE TABLE {0} (" "id INTEGER PRIMARY KEY AUTOINCREMENT," "pub STRING(256)," "wif STRING(256))".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) connection.commit() def getPublicKeys(self): query = ("SELECT pub from {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(query) results = cursor.fetchall() return [x[0] for x in results] except sqlite3.OperationalError: return [] def getPrivateKeyForPublicKey(self, pub): query = ("SELECT wif from {0} WHERE pub=?".format(self.__tablename__), (pub,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) key = cursor.fetchone() if key: return key[0] else: return None def updateWif(self, pub, wif): query = ("UPDATE {0} SET wif=? WHERE pub=?".format(self.__tablename__), (wif, pub)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def add(self, wif, pub): if self.getPrivateKeyForPublicKey(pub): raise ValueError("Key already in storage") query = ("INSERT INTO {0} (pub, wif) VALUES (?, ?)".format(self.__tablename__), (pub, wif)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def delete(self, pub): query = ("DELETE FROM {0} WHERE pub=?".format(self.__tablename__), (pub,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def wipe(self, sure=False): if not sure: log.error( "You need to confirm that you are sure " "and understand the implications of " "wiping your wallet!" ) return else: query = ("DELETE FROM {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) connection.commit() class Token(DataDir): __tablename__ = 'token' def __init__(self): super(Token, self).__init__() def exists_table(self): query = ("SELECT name FROM sqlite_master " "WHERE type='table' AND name=?", (self.__tablename__, )) try: connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) return True if cursor.fetchone() else False except sqlite3.OperationalError: self.sqlDataBaseFile = ":memory:" log.warning("Could not read(database: %s)" % (self.sqlDataBaseFile)) return True def create_table(self): query = ("CREATE TABLE {0} (" "id INTEGER PRIMARY KEY AUTOINCREMENT," "name STRING(256)," "token STRING(256))".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) connection.commit() def getPublicNames(self): query = ("SELECT name from {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(query) results = cursor.fetchall() return [x[0] for x in results] except sqlite3.OperationalError: return [] def getTokenForPublicName(self, name): query = ("SELECT token from {0} WHERE name=?".format(self.__tablename__), (name,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) token = cursor.fetchone() if token: return token[0] else: return None def updateToken(self, name, token): query = ("UPDATE {0} SET token=? WHERE name=?".format(self.__tablename__), (token, name)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def add(self, name, token): if self.getTokenForPublicName(name): raise ValueError("Key already in storage") query = ("INSERT INTO {0} (name, token) VALUES (?, ?)".format(self.__tablename__), (name, token)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def delete(self, name): query = ("DELETE FROM {0} WHERE name=?".format(self.__tablename__), (name,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) connection.commit() def wipe(self, sure=False): if not sure: log.error( "You need to confirm that you are sure " "and understand the implications of " "wiping your wallet!" ) return else: query = ("DELETE FROM {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) connection.commit() class Configuration(DataDir): __tablename__ = "config" nodelist = NodeList() nodes = nodelist.get_nodes(normal=True, appbase=True, dev=False, testnet=False) config_defaults = { "node": nodes, "password_storage": "environment", "rpcpassword": "", "rpcuser": "", "order-expiration": 7 * 24 * 60 * 60, "client_id": "", "hot_sign_redirect_uri": None, "dpid_api_url": "https://dpayid.io/api/", "oauth_base_url": "https://dpayid.io/oauth2/"} def __init__(self): super(Configuration, self).__init__() def exists_table(self): query = ("SELECT name FROM sqlite_master " "WHERE type='table' AND name=?", (self.__tablename__,)) try: connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(*query) return True if cursor.fetchone() else False except sqlite3.OperationalError: self.sqlDataBaseFile = ":memory:" log.warning("Could not read(database: %s)" % (self.sqlDataBaseFile)) return True def create_table(self): query = ("CREATE TABLE {0} (" "id INTEGER PRIMARY KEY AUTOINCREMENT," "key STRING(256)," "value STRING(256))".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(query) connection.commit() except sqlite3.OperationalError: log.error("Could not write to database: %s" % (self.__tablename__)) raise NoWriteAccess("Could not write to database: %s" % (self.__tablename__)) def checkBackup(self): if ("lastBackup" not in configStorage or configStorage["lastBackup"] == ""): print("No backup has been created yet!") self.refreshBackup() try: if ( datetime.utcnow() - datetime.strptime(configStorage["lastBackup"], timeformat) ).days > 7: print("Backups older than 7 days!") self.refreshBackup() except: self.refreshBackup() def _haveKey(self, key): query = ("SELECT value FROM {0} WHERE key=?".format(self.__tablename__), (key,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(*query) return True if cursor.fetchone() else False except sqlite3.OperationalError: log.warning("Could not read %s (database: %s)" % (str(key), self.__tablename__)) return False def __getitem__(self, key): query = ("SELECT value FROM {0} WHERE key=?".format(self.__tablename__), (key,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(*query) result = cursor.fetchone() if result: return result[0] else: if key in self.config_defaults: return self.config_defaults[key] else: return None except sqlite3.OperationalError: log.warning("Could not read %s (database: %s)" % (str(key), self.__tablename__)) if key in self.config_defaults: return self.config_defaults[key] else: return None def get(self, key, default=None): if key in self: return self.__getitem__(key) else: return default def __contains__(self, key): if self._haveKey(key) or key in self.config_defaults: return True else: return False def __setitem__(self, key, value): if self._haveKey(key): query = ("UPDATE {0} SET value=? WHERE key=?".format(self.__tablename__), (value, key)) else: query = ("INSERT INTO {0} (key, value) VALUES (?, ?)".format(self.__tablename__), (key, value)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(*query) connection.commit() except sqlite3.OperationalError: log.error("Could not write to %s (database: %s)" % (str(key), self.__tablename__)) raise NoWriteAccess("Could not write to %s (database: %s)" % (str(key), self.__tablename__)) def delete(self, key): query = ("DELETE FROM {0} WHERE key=?".format(self.__tablename__), (key,)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() try: cursor.execute(*query) connection.commit() except sqlite3.OperationalError: log.error("Could not write to %s (database: %s)" % (str(key), self.__tablename__)) raise NoWriteAccess("Could not write to %s (database: %s)" % (str(key), self.__tablename__)) def __iter__(self): return iter(list(self.items())) def items(self): query = ("SELECT key, value from {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) r = {} for key, value in cursor.fetchall(): r[key] = value return r def __len__(self): query = ("SELECT id from {0} ".format(self.__tablename__)) connection = sqlite3.connect(self.sqlDataBaseFile) cursor = connection.cursor() cursor.execute(query) return len(cursor.fetchall()) class MasterPassword(object): password = "" decrypted_master = "" config_key = "encrypted_master_password" def __init__(self, password): self.password = password if self.config_key not in configStorage: self.newMaster() self.saveEncrytpedMaster() else: self.decryptEncryptedMaster() def decryptEncryptedMaster(self): aes = AESCipher(self.password) checksum, encrypted_master = configStorage[self.config_key].split("$") try: decrypted_master = aes.decrypt(encrypted_master) except: raise WrongMasterPasswordException if checksum != self.deriveChecksum(decrypted_master): raise WrongMasterPasswordException self.decrypted_master = decrypted_master def saveEncrytpedMaster(self): configStorage[self.config_key] = self.getEncryptedMaster() def newMaster(self): if (self.config_key in configStorage and configStorage[self.config_key]): return self.decrypted_master = hexlify(os.urandom(32)).decode("ascii") def deriveChecksum(self, s): checksum = hashlib.sha256(py23_bytes(s, "ascii")).hexdigest() return checksum[:4] def getEncryptedMaster(self): if not self.decrypted_master: raise Exception("master not decrypted") aes = AESCipher(self.password) return "{}${}".format(self.deriveChecksum(self.decrypted_master), aes.encrypt(self.decrypted_master)) def changePassword(self, newpassword): self.password = newpassword self.saveEncrytpedMaster() @staticmethod def wipe(sure=False): if not sure: log.error( "You need to confirm that you are sure " "and understand the implications of " "wiping your wallet!" ) return else: configStorage.delete(MasterPassword.config_key) keyStorage = Key() tokenStorage = Token() configStorage = Configuration() if not configStorage.exists_table(): configStorage.create_table() newKeyStorage = False if not keyStorage.exists_table(): newKeyStorage = True keyStorage.create_table() newTokenStorage = False if not tokenStorage.exists_table(): newTokenStorage = True tokenStorage.create_table()
true
true
1c3555f8149df493963d51cf18f6e2dc9ec2720b
2,273
py
Python
examples/django_app/example_app/views.py
maclogan/VirtualPenPal
6e95f48d876aea151cd445b3e36ad69be03d5780
[ "BSD-3-Clause" ]
null
null
null
examples/django_app/example_app/views.py
maclogan/VirtualPenPal
6e95f48d876aea151cd445b3e36ad69be03d5780
[ "BSD-3-Clause" ]
null
null
null
examples/django_app/example_app/views.py
maclogan/VirtualPenPal
6e95f48d876aea151cd445b3e36ad69be03d5780
[ "BSD-3-Clause" ]
null
null
null
from django.views.generic.base import TemplateView from django.shortcuts import render, redirect from django.contrib.auth import ( authenticate, login, logout, get_user_model ) from django.views import generic from django.views.generic import View from .forms import UserForm class ChatterBotAppView(TemplateView): template_name = "app.html" class UserFormView(View): form_class = UserForm template_name = 'user_registration.html' def get(self, request): form = self.form_class(None) return render(request, self.template_name, {'form': form}) def post(self, request): form = self.form_class(request.POST) if form.is_valid(): user = form.save(commit=False) username = form.cleaned_data['username'] password = form.cleaned_data['password'] user.set_password(password) user.save() user = authenticate(username=username, password=password) if user is not None: if user.is_active: login(request, user) return redirect('/') return render(request, self.template_name, {'form':form}) class AboutView(View): template_name = "about.html" def get(self, request): return render(request, self.template_name) # class UserLoginView(View): # form_class = UserLoginForm # template_name = 'login.html' # # def get(self, request): # form = self.form_class(None) # return render(request, self.template_name, {'form': form}) # # def post(self, request): # form = self.form_class(request.POST) # # if form.is_valid(): # username = form.cleaned_data['username'] # password = form.cleaned_data['password'] # user = authenticate(username=username, password=password) # if user is not None: # if user.is_active: # login(request, user) # return redirect('/') # else: # print ("not active") # else: # print ("None") # else: # print ("Form not Valid") # # return render(request, self.template_name, {'form':form})
31.136986
71
0.591289
from django.views.generic.base import TemplateView from django.shortcuts import render, redirect from django.contrib.auth import ( authenticate, login, logout, get_user_model ) from django.views import generic from django.views.generic import View from .forms import UserForm class ChatterBotAppView(TemplateView): template_name = "app.html" class UserFormView(View): form_class = UserForm template_name = 'user_registration.html' def get(self, request): form = self.form_class(None) return render(request, self.template_name, {'form': form}) def post(self, request): form = self.form_class(request.POST) if form.is_valid(): user = form.save(commit=False) username = form.cleaned_data['username'] password = form.cleaned_data['password'] user.set_password(password) user.save() user = authenticate(username=username, password=password) if user is not None: if user.is_active: login(request, user) return redirect('/') return render(request, self.template_name, {'form':form}) class AboutView(View): template_name = "about.html" def get(self, request): return render(request, self.template_name)
true
true
1c3556be885e65be6142efcd0b8f6d82f18ddbe3
6,004
py
Python
twitchio/ext/pubsub/models.py
tesence/TwitchIO
3362a916f5facb2ee9c271c663d229947ae69f0b
[ "MIT" ]
null
null
null
twitchio/ext/pubsub/models.py
tesence/TwitchIO
3362a916f5facb2ee9c271c663d229947ae69f0b
[ "MIT" ]
null
null
null
twitchio/ext/pubsub/models.py
tesence/TwitchIO
3362a916f5facb2ee9c271c663d229947ae69f0b
[ "MIT" ]
null
null
null
""" The MIT License (MIT) Copyright (c) 2017-2021 TwitchIO Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import datetime from typing import List, Optional from twitchio import PartialUser, Client, Channel, CustomReward __all__ = ( "PoolError", "PoolFull", "PubSubMessage", "PubSubBitsMessage", "PubSubBitsBadgeMessage", "PubSubChatMessage", "PubSubBadgeEntitlement", "PubSubChannelPointsMessage", "PubSubModerationAction", ) class PubSubError(Exception): pass class ConnectionFailure(PubSubError): pass class PoolError(PubSubError): pass class PoolFull(PoolError): pass class PubSubChatMessage: __slots__ = "content", "id", "type" def __init__(self, content: str, id: str, type: str): self.content = content self.id = int(id) self.type = type class PubSubBadgeEntitlement: __slots__ = "new", "old" def __init__(self, new: int, old: int): self.new = new self.old = old class PubSubMessage: __slots__ = "topic", "_data" def __init__(self, client: Client, topic: Optional[str], data: dict): self.topic = topic self._data = data class PubSubBitsMessage(PubSubMessage): __slots__ = "badge_entitlement", "bits_used", "channel_id", "context", "anonymous", "message", "user", "version" def __init__(self, client: Client, topic: str, data: dict): super().__init__(client, topic, data) self.message = PubSubChatMessage(data["chat_message"], data["message_id"], data["message_type"]) self.badge_entitlement = ( PubSubBadgeEntitlement(data["badge_entitlement"]["new_version"], data["badge_entitlement"]["old_version"]) if data["badge_entitlement"] else None ) self.bits_used: int = data["bits_used"] self.channel_id: int = int(data["channel_id"]) self.user = ( PartialUser(client._http, data["user_id"], data["user_name"]) if data["user_id"] is not None else None ) self.version: str = data["version"] class PubSubBitsBadgeMessage(PubSubMessage): __slots__ = "user", "channel", "badge_tier", "message", "timestamp" def __init__(self, client: Client, topic: str, data: dict): super().__init__(client, topic, data) self.user = PartialUser(client._http, data["user_id"], data["user_name"]) self.channel: Channel = client.get_channel(data["channel_name"]) or Channel( name=data["channel_name"], websocket=client._connection ) self.badge_tier: int = data["badge_tier"] self.message = data["chat_message"] self.timestamp = datetime.datetime.strptime(data["time"], "%Y-%m-%dT%H:%M:%SZ") class PubSubChannelPointsMessage(PubSubMessage): __slots__ = "timestamp", "channel_id", "user", "id", "reward", "input", "status" def __init__(self, client: Client, data: dict): super().__init__(client, None, data) self.timestamp = datetime.datetime.strptime(data["redemption"]["redeemed_at"], "%Y-%m-%dT%H:%M:%SZ") self.channel_id: int = int(data["redemption"]["channel_id"]) self.id: str = data["redemption"]["id"] self.user = PartialUser(client._http, data["user"]["id"], data["user"]["display_name"]) self.reward = CustomReward( client._http, data["redemption"]["reward"], PartialUser(client._http, self.channel_id, None) ) self.input: str = data["redemption"]["user_input"] self.status: str = data["redemption"]["status"] class PubSubModerationAction(PubSubMessage): __slots__ = "action", "args", "created_by", "message_id", "target", "from_automod" def __init__(self, client: Client, topic: str, data: dict): super().__init__(client, topic, data) self.action: str = data["message"]["data"]["moderation_action"] self.args: List[str] = data["message"]["data"]["args"] self.created_by = PartialUser( client._http, data["message"]["data"]["created_by_user_id"], data["message"]["data"]["created_by"] ) self.message_id: str = data["message"]["data"]["msg_id"] self.target = ( PartialUser( client._http, data["message"]["data"]["target_user_id"], data["message"]["data"]["target_user_login"] ) if data["message"]["data"]["target_user_id"] else None ) self.from_automod: bool = data["message"]["data"]["from_automod"] _mapping = { "channel-bits-events-v2": ("pubsub_bits", PubSubBitsMessage), "channel-bits-badge-unlocks": ("pubsub_bits_badge", PubSubBitsBadgeMessage), "channel-subscribe-events-v1": ("pubsub_subscription", None), "chat_moderator_actions": ("pubsub_moderation", PubSubModerationAction), "whispers": ("pubsub_whisper", None), } def create_message(client, msg: dict): topic = msg["data"]["topic"].split(".")[0] r = _mapping[topic] return r[0], r[1](client, topic, msg["data"])
34.113636
118
0.665057
import datetime from typing import List, Optional from twitchio import PartialUser, Client, Channel, CustomReward __all__ = ( "PoolError", "PoolFull", "PubSubMessage", "PubSubBitsMessage", "PubSubBitsBadgeMessage", "PubSubChatMessage", "PubSubBadgeEntitlement", "PubSubChannelPointsMessage", "PubSubModerationAction", ) class PubSubError(Exception): pass class ConnectionFailure(PubSubError): pass class PoolError(PubSubError): pass class PoolFull(PoolError): pass class PubSubChatMessage: __slots__ = "content", "id", "type" def __init__(self, content: str, id: str, type: str): self.content = content self.id = int(id) self.type = type class PubSubBadgeEntitlement: __slots__ = "new", "old" def __init__(self, new: int, old: int): self.new = new self.old = old class PubSubMessage: __slots__ = "topic", "_data" def __init__(self, client: Client, topic: Optional[str], data: dict): self.topic = topic self._data = data class PubSubBitsMessage(PubSubMessage): __slots__ = "badge_entitlement", "bits_used", "channel_id", "context", "anonymous", "message", "user", "version" def __init__(self, client: Client, topic: str, data: dict): super().__init__(client, topic, data) self.message = PubSubChatMessage(data["chat_message"], data["message_id"], data["message_type"]) self.badge_entitlement = ( PubSubBadgeEntitlement(data["badge_entitlement"]["new_version"], data["badge_entitlement"]["old_version"]) if data["badge_entitlement"] else None ) self.bits_used: int = data["bits_used"] self.channel_id: int = int(data["channel_id"]) self.user = ( PartialUser(client._http, data["user_id"], data["user_name"]) if data["user_id"] is not None else None ) self.version: str = data["version"] class PubSubBitsBadgeMessage(PubSubMessage): __slots__ = "user", "channel", "badge_tier", "message", "timestamp" def __init__(self, client: Client, topic: str, data: dict): super().__init__(client, topic, data) self.user = PartialUser(client._http, data["user_id"], data["user_name"]) self.channel: Channel = client.get_channel(data["channel_name"]) or Channel( name=data["channel_name"], websocket=client._connection ) self.badge_tier: int = data["badge_tier"] self.message = data["chat_message"] self.timestamp = datetime.datetime.strptime(data["time"], "%Y-%m-%dT%H:%M:%SZ") class PubSubChannelPointsMessage(PubSubMessage): __slots__ = "timestamp", "channel_id", "user", "id", "reward", "input", "status" def __init__(self, client: Client, data: dict): super().__init__(client, None, data) self.timestamp = datetime.datetime.strptime(data["redemption"]["redeemed_at"], "%Y-%m-%dT%H:%M:%SZ") self.channel_id: int = int(data["redemption"]["channel_id"]) self.id: str = data["redemption"]["id"] self.user = PartialUser(client._http, data["user"]["id"], data["user"]["display_name"]) self.reward = CustomReward( client._http, data["redemption"]["reward"], PartialUser(client._http, self.channel_id, None) ) self.input: str = data["redemption"]["user_input"] self.status: str = data["redemption"]["status"] class PubSubModerationAction(PubSubMessage): __slots__ = "action", "args", "created_by", "message_id", "target", "from_automod" def __init__(self, client: Client, topic: str, data: dict): super().__init__(client, topic, data) self.action: str = data["message"]["data"]["moderation_action"] self.args: List[str] = data["message"]["data"]["args"] self.created_by = PartialUser( client._http, data["message"]["data"]["created_by_user_id"], data["message"]["data"]["created_by"] ) self.message_id: str = data["message"]["data"]["msg_id"] self.target = ( PartialUser( client._http, data["message"]["data"]["target_user_id"], data["message"]["data"]["target_user_login"] ) if data["message"]["data"]["target_user_id"] else None ) self.from_automod: bool = data["message"]["data"]["from_automod"] _mapping = { "channel-bits-events-v2": ("pubsub_bits", PubSubBitsMessage), "channel-bits-badge-unlocks": ("pubsub_bits_badge", PubSubBitsBadgeMessage), "channel-subscribe-events-v1": ("pubsub_subscription", None), "chat_moderator_actions": ("pubsub_moderation", PubSubModerationAction), "whispers": ("pubsub_whisper", None), } def create_message(client, msg: dict): topic = msg["data"]["topic"].split(".")[0] r = _mapping[topic] return r[0], r[1](client, topic, msg["data"])
true
true
1c35570e864a58b65e4579108d82304f11faf769
12,093
py
Python
PyStacks/test/templates/test_rds.py
0xack13/PyStacks
13136c43089c241680beb216a233d1846119dd7c
[ "MIT" ]
11
2018-02-15T04:27:05.000Z
2020-10-02T11:20:08.000Z
PyStacks/test/templates/test_rds.py
0xack13/PyStacks
13136c43089c241680beb216a233d1846119dd7c
[ "MIT" ]
3
2018-02-15T05:46:54.000Z
2018-03-05T04:46:51.000Z
PyStacks/test/templates/test_rds.py
0xack13/PyStacks
13136c43089c241680beb216a233d1846119dd7c
[ "MIT" ]
8
2018-03-05T04:40:41.000Z
2021-02-22T08:07:58.000Z
import unittest from PyStacks.PyStacks.template import templateCF class TestTemplate(unittest.TestCase): def test_templateCF_RDSParameterGroup(self): resources = { 'rdsparamsgroup': { 'TestRDSParamGroup': { 'description': 'Some Description...', 'family': 'MySQL5.1', 'parameters': { 'testparamkey1': 'testparamval1', 'testparamkey2': 'testparamval2' }, 'tags': { 'testtagkey1': 'testtagval1', 'testtagkey2': 'testtagval2' } } } } expected = { 'TestRDSParamGroup': { 'Type': 'AWS::RDS::DBParameterGroup', 'Properties': { 'Description': 'Some Description...', 'Family': 'MySQL5.1', 'Parameters': { 'testparamkey1': 'testparamval1', 'testparamkey2': 'testparamval2' }, 'Tags': [ { 'Key': 'testtagkey1', 'Value': 'testtagval1' }, { 'Key': 'testtagkey2', 'Value': 'testtagval2' } ] } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) def test_templateCF_RDSSubnet(self): resources = { 'rdssubnet': { 'TestRDSSubnet': { 'description': 'Some Description...', 'subnets': [ 'testsubnet1', 'testsubnet2' ], 'tags': { 'testtagkey1': 'testtagval1', 'testtagkey2': 'testtagval2' } } } } expected = { 'TestRDSSubnet': { 'Type': 'AWS::RDS::DBSubnetGroup', 'Properties': { 'DBSubnetGroupDescription': 'Some Description...', 'SubnetIds': [ { "Fn::ImportValue": { "Fn::Sub": [ "${VPCStack}-Subnet-testsubnet1", { "VPCStack": { "Ref": "VPCStack" } } ] } }, { "Fn::ImportValue": { "Fn::Sub": [ "${VPCStack}-Subnet-testsubnet2", { "VPCStack": { "Ref": "VPCStack" } } ] } } ], 'Tags': [ { 'Key': 'testtagkey1', 'Value': 'testtagval1' }, { 'Key': 'testtagkey2', 'Value': 'testtagval2' } ] } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) def test_templateCF_RDS(self): resources = { 'rds': { 'SimpleRDS': { 'allocatedstorage': '100', 'allowmajorupgrade': True, 'allowminorupgrade': True, 'multiaz': True, 'databasename': 'SimpleRDSName', 'instanceclass': 'db.r3.large', 'rdssubnetgroup': 'rdsubnet', 'engine': 'postgres', 'engineversion': '9.4.7', 'port': '5432', 'backupwindow': '19:30-20:00', 'maintenancewindow': '21:30-22:00', 'storageencryption': 'False', 'storagetype': 'standard', 'secgroups': [ 'securityGroup1' ] }, 'ComplexRDS': { 'allocatedstorage': '200', 'allowmajorupgrade': False, 'allowminorupgrade': False, 'multiaz': False, 'az': 'b', 'backupretention': 7, 'databasename': 'SimpleRDSName', 'dbname': 'SimpleDBName', 'iops': 3000, 'kmskeyid': 'kmskeyidtest', 'DBSnapshotIdentifier': 'snapshotidtest', 'monitoringarn': 'monitoringarntest', 'monitoringinterval': 5, 'instanceclass': 'db.r3.large', 'rdssubnetgroup': 'rdsubnet', 'engine': 'postgres', 'engineversion': '9.4.7', 'port': '5432', 'backupwindow': '19:30-20:00', 'maintenancewindow': '21:30-22:00', 'storageencryption': False, 'storagetype': 'standard', 'secgroups': [ 'securityGroup1', 'securityGroup2' ], 'tags': { 'testtagkey1': 'testtagval1', 'testtagkey2': 'testtagval2' } } } } expected = { 'SimpleRDS': { 'Type': 'AWS::RDS::DBInstance', 'Properties': { 'AllocatedStorage': '100', 'AllowMajorVersionUpgrade': 'true', 'AutoMinorVersionUpgrade': 'true', 'MultiAZ': 'true', 'CopyTagsToSnapshot': 'true', 'DBInstanceIdentifier': 'SimpleRDSName', 'DBInstanceClass': 'db.r3.large', 'DBSubnetGroupName': { 'Fn::ImportValue': { 'Fn::Sub': [ '${RDSSubnetStack}-RDS-rdsubnet-Subnet', { 'RDSSubnetStack': { 'Ref': 'RDSSubnetStack' } } ] } }, 'Engine': 'postgres', 'EngineVersion': '9.4.7', 'MasterUserPassword': {'Ref': 'DBPassword'}, 'Port': '5432', 'PreferredBackupWindow': '19:30-20:00', 'PreferredMaintenanceWindow': '21:30-22:00', 'PubliclyAccessible': 'false', 'StorageEncrypted': 'false', 'StorageType': 'standard', 'VPCSecurityGroups': [ { 'Fn::ImportValue': { 'Fn::Sub': [ '${SecurityStack}-SecGroup-securityGroup1', { 'SecurityStack': { 'Ref': 'SecurityStack' } } ] } } ] } }, 'ComplexRDS': { 'Type': 'AWS::RDS::DBInstance', 'Properties': { 'AllocatedStorage': '200', 'AllowMajorVersionUpgrade': 'false', 'AutoMinorVersionUpgrade': 'false', 'BackupRetentionPeriod': '7', 'MultiAZ': 'false', 'AvailabilityZone': { 'Fn::Join': [ '', [ { 'Ref': 'AWS::Region' }, 'b' ] ] }, 'CopyTagsToSnapshot': 'true', 'DBInstanceIdentifier': 'SimpleRDSName', 'DBInstanceClass': 'db.r3.large', 'DBName': 'SimpleDBName', 'DBSubnetGroupName': { 'Fn::ImportValue': { 'Fn::Sub': [ '${RDSSubnetStack}-RDS-rdsubnet-Subnet', { 'RDSSubnetStack': { 'Ref': 'RDSSubnetStack' } } ] } }, 'DBSnapshotIdentifier': 'snapshotidtest', 'Engine': 'postgres', 'EngineVersion': '9.4.7', 'Iops': '3000', 'KmsKeyId': 'kmskeyidtest', 'Port': '5432', 'PreferredBackupWindow': '19:30-20:00', 'PreferredMaintenanceWindow': '21:30-22:00', 'PubliclyAccessible': 'false', 'StorageEncrypted': 'false', 'StorageType': 'standard', 'VPCSecurityGroups': [ { 'Fn::ImportValue': { 'Fn::Sub': [ '${SecurityStack}-SecGroup-securityGroup1', { 'SecurityStack': { 'Ref': 'SecurityStack' } } ] } }, { 'Fn::ImportValue': { 'Fn::Sub': [ '${SecurityStack}-SecGroup-securityGroup2', { 'SecurityStack': { 'Ref': 'SecurityStack' } } ] } } ], 'Tags': [ { 'Key': 'testtagkey1', 'Value': 'testtagval1' }, { 'Key': 'testtagkey2', 'Value': 'testtagval2' } ] } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) if __name__ == '__main__': unittest.main()
38.512739
79
0.291987
import unittest from PyStacks.PyStacks.template import templateCF class TestTemplate(unittest.TestCase): def test_templateCF_RDSParameterGroup(self): resources = { 'rdsparamsgroup': { 'TestRDSParamGroup': { 'description': 'Some Description...', 'family': 'MySQL5.1', 'parameters': { 'testparamkey1': 'testparamval1', 'testparamkey2': 'testparamval2' }, 'tags': { 'testtagkey1': 'testtagval1', 'testtagkey2': 'testtagval2' } } } } expected = { 'TestRDSParamGroup': { 'Type': 'AWS::RDS::DBParameterGroup', 'Properties': { 'Description': 'Some Description...', 'Family': 'MySQL5.1', 'Parameters': { 'testparamkey1': 'testparamval1', 'testparamkey2': 'testparamval2' }, 'Tags': [ { 'Key': 'testtagkey1', 'Value': 'testtagval1' }, { 'Key': 'testtagkey2', 'Value': 'testtagval2' } ] } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) def test_templateCF_RDSSubnet(self): resources = { 'rdssubnet': { 'TestRDSSubnet': { 'description': 'Some Description...', 'subnets': [ 'testsubnet1', 'testsubnet2' ], 'tags': { 'testtagkey1': 'testtagval1', 'testtagkey2': 'testtagval2' } } } } expected = { 'TestRDSSubnet': { 'Type': 'AWS::RDS::DBSubnetGroup', 'Properties': { 'DBSubnetGroupDescription': 'Some Description...', 'SubnetIds': [ { "Fn::ImportValue": { "Fn::Sub": [ "${VPCStack}-Subnet-testsubnet1", { "VPCStack": { "Ref": "VPCStack" } } ] } }, { "Fn::ImportValue": { "Fn::Sub": [ "${VPCStack}-Subnet-testsubnet2", { "VPCStack": { "Ref": "VPCStack" } } ] } } ], 'Tags': [ { 'Key': 'testtagkey1', 'Value': 'testtagval1' }, { 'Key': 'testtagkey2', 'Value': 'testtagval2' } ] } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) def test_templateCF_RDS(self): resources = { 'rds': { 'SimpleRDS': { 'allocatedstorage': '100', 'allowmajorupgrade': True, 'allowminorupgrade': True, 'multiaz': True, 'databasename': 'SimpleRDSName', 'instanceclass': 'db.r3.large', 'rdssubnetgroup': 'rdsubnet', 'engine': 'postgres', 'engineversion': '9.4.7', 'port': '5432', 'backupwindow': '19:30-20:00', 'maintenancewindow': '21:30-22:00', 'storageencryption': 'False', 'storagetype': 'standard', 'secgroups': [ 'securityGroup1' ] }, 'ComplexRDS': { 'allocatedstorage': '200', 'allowmajorupgrade': False, 'allowminorupgrade': False, 'multiaz': False, 'az': 'b', 'backupretention': 7, 'databasename': 'SimpleRDSName', 'dbname': 'SimpleDBName', 'iops': 3000, 'kmskeyid': 'kmskeyidtest', 'DBSnapshotIdentifier': 'snapshotidtest', 'monitoringarn': 'monitoringarntest', 'monitoringinterval': 5, 'instanceclass': 'db.r3.large', 'rdssubnetgroup': 'rdsubnet', 'engine': 'postgres', 'engineversion': '9.4.7', 'port': '5432', 'backupwindow': '19:30-20:00', 'maintenancewindow': '21:30-22:00', 'storageencryption': False, 'storagetype': 'standard', 'secgroups': [ 'securityGroup1', 'securityGroup2' ], 'tags': { 'testtagkey1': 'testtagval1', 'testtagkey2': 'testtagval2' } } } } expected = { 'SimpleRDS': { 'Type': 'AWS::RDS::DBInstance', 'Properties': { 'AllocatedStorage': '100', 'AllowMajorVersionUpgrade': 'true', 'AutoMinorVersionUpgrade': 'true', 'MultiAZ': 'true', 'CopyTagsToSnapshot': 'true', 'DBInstanceIdentifier': 'SimpleRDSName', 'DBInstanceClass': 'db.r3.large', 'DBSubnetGroupName': { 'Fn::ImportValue': { 'Fn::Sub': [ '${RDSSubnetStack}-RDS-rdsubnet-Subnet', { 'RDSSubnetStack': { 'Ref': 'RDSSubnetStack' } } ] } }, 'Engine': 'postgres', 'EngineVersion': '9.4.7', 'MasterUserPassword': {'Ref': 'DBPassword'}, 'Port': '5432', 'PreferredBackupWindow': '19:30-20:00', 'PreferredMaintenanceWindow': '21:30-22:00', 'PubliclyAccessible': 'false', 'StorageEncrypted': 'false', 'StorageType': 'standard', 'VPCSecurityGroups': [ { 'Fn::ImportValue': { 'Fn::Sub': [ '${SecurityStack}-SecGroup-securityGroup1', { 'SecurityStack': { 'Ref': 'SecurityStack' } } ] } } ] } }, 'ComplexRDS': { 'Type': 'AWS::RDS::DBInstance', 'Properties': { 'AllocatedStorage': '200', 'AllowMajorVersionUpgrade': 'false', 'AutoMinorVersionUpgrade': 'false', 'BackupRetentionPeriod': '7', 'MultiAZ': 'false', 'AvailabilityZone': { 'Fn::Join': [ '', [ { 'Ref': 'AWS::Region' }, 'b' ] ] }, 'CopyTagsToSnapshot': 'true', 'DBInstanceIdentifier': 'SimpleRDSName', 'DBInstanceClass': 'db.r3.large', 'DBName': 'SimpleDBName', 'DBSubnetGroupName': { 'Fn::ImportValue': { 'Fn::Sub': [ '${RDSSubnetStack}-RDS-rdsubnet-Subnet', { 'RDSSubnetStack': { 'Ref': 'RDSSubnetStack' } } ] } }, 'DBSnapshotIdentifier': 'snapshotidtest', 'Engine': 'postgres', 'EngineVersion': '9.4.7', 'Iops': '3000', 'KmsKeyId': 'kmskeyidtest', 'Port': '5432', 'PreferredBackupWindow': '19:30-20:00', 'PreferredMaintenanceWindow': '21:30-22:00', 'PubliclyAccessible': 'false', 'StorageEncrypted': 'false', 'StorageType': 'standard', 'VPCSecurityGroups': [ { 'Fn::ImportValue': { 'Fn::Sub': [ '${SecurityStack}-SecGroup-securityGroup1', { 'SecurityStack': { 'Ref': 'SecurityStack' } } ] } }, { 'Fn::ImportValue': { 'Fn::Sub': [ '${SecurityStack}-SecGroup-securityGroup2', { 'SecurityStack': { 'Ref': 'SecurityStack' } } ] } } ], 'Tags': [ { 'Key': 'testtagkey1', 'Value': 'testtagval1' }, { 'Key': 'testtagkey2', 'Value': 'testtagval2' } ] } } } actual = templateCF(resources, 'resources') self.assertDictEqual(actual, expected) if __name__ == '__main__': unittest.main()
true
true
1c35579873b9f27211078b0748c5ed1087adda4a
6,056
py
Python
sb_service/daemons/PostProcessDaemon.py
isabella232/scorebot
71f26a73b04c419ed0ccfbe73575d4b02fa26595
[ "Apache-2.0" ]
63
2019-11-22T23:54:21.000Z
2020-10-15T16:42:34.000Z
sb_service/daemons/PostProcessDaemon.py
isabella232/scorebot
71f26a73b04c419ed0ccfbe73575d4b02fa26595
[ "Apache-2.0" ]
null
null
null
sb_service/daemons/PostProcessDaemon.py
isabella232/scorebot
71f26a73b04c419ed0ccfbe73575d4b02fa26595
[ "Apache-2.0" ]
11
2019-11-23T04:41:15.000Z
2021-06-10T15:14:21.000Z
import time import traceback from django.db import connection from django.db.backends.base.base import BaseDatabaseWrapper from django.db.utils import OperationalError from django.utils import timezone from external_tools.github.GithubConfig import GithubConfig from external_tools.github.GithubPullRequest import GithubPullRequest from common import constants from core.models import ScorebotConfig, ScorebotMetrics, PostProcessMetrics, ScorebotControlCpp, \ ScorebotControlJava, ScorebotControlKraken from sb_service.common import daemon_utils class PostProcessDaemon: def __init__(self, framework, logger): self.framework = framework self._logger = logger if self.framework == "CPP": self.ScorebotControl = ScorebotControlCpp elif self.framework == "Java": self.ScorebotControl = ScorebotControlJava elif self.framework == "Kraken": self.ScorebotControl = ScorebotControlKraken def _load_pp(self): post_process_list = self.ScorebotControl.objects.filter(post_process=True).values_list('security_category', flat=True) pr_to_process = [] for cat in post_process_list: pr_to_process += ScorebotMetrics.objects.filter(security_category=cat, framework=self.framework, post_process=False) return pr_to_process def _process_pr(self, pr): github_pull_request = GithubPullRequest() config = GithubConfig() pr_url = pr.pull_request_url github_pull_request.load(config, pr_url) if github_pull_request.state == "closed": try: # Either merged or closed merged_bool = github_pull_request.merged if merged_bool: pr_state = "merged" merged_user = github_pull_request.merged_by else: pr_state = "closed" merged_user = github_pull_request.closed_by PostProcessMetrics.objects.create(pull_request_url=pr_url, state=pr_state, closed_user=merged_user, security_category=pr.security_category, framework=self.framework) pr.post_process = True pr.save(update_fields=["post_process"]) except Exception as err: self._logger.critical("{0}\n{1}\n".format(type(err), traceback.format_exc()) + pr.pull_request_url) def _restore_db_connection(self): """ Restore db connection """ try: cursor = connection.cursor() db = cursor.db assert issubclass(db.__class__, BaseDatabaseWrapper) if db.connection is None or not db.is_usable(): db.close_if_unusable_or_obsolete() with db.wrap_database_errors: db.connect() self._logger.info('Restoring the MySQL connection') except Exception as err: self._logger.critical('DB connection error') self._logger.critical("{0}\n{1}\n".format(type(err), traceback.format_exc())) def run(self): last_dbalarm_time = None while True: self._logger.info("start run") self._restore_db_connection() try: sleep_time = int(ScorebotConfig.objects.filter(config="post_process_sleeptime").values()[0]['value']) except OperationalError as err: # Close the ORM DB connection, so Django can re-open it next time a DB query needs processing. connection.close() if daemon_utils.is_time_up(constants.DB_EXCEPTION_CHECK_TIME, last_dbalarm_time, True): # The overall effect of this check is to wait 15min between critical alert notifications. # Unless the DB connection is bouncing, then we get notified on every bounce. self._logger.info("{0}\n{1}\n".format(type(err), traceback.format_exc())) last_dbalarm_time = timezone.now() else: self._logger.error("***ORM DB Connection*** Still getting OperationalError from DB connector") continue except Exception as err: self._logger.critical("{0}\n{1}\n".format(type(err), traceback.format_exc())) return pr_to_process = self._load_pp() # Process PR self._logger.info("processing PRs") for pr in pr_to_process: try: self._logger.info("processing pr: " + pr.pull_request_url) # Prevent same PR from being processed pr_processed = PostProcessMetrics.objects.filter(pull_request_url=pr.pull_request_url) if pr.scorebot_mode == "silent": pr.post_process = True pr.save(update_fields=["post_process"]) elif pr_processed and pr.scorebot_mode is not "silent": pr_processed = pr_processed[0] if pr.security_category not in pr_processed.security_category: pr_processed.security_category = pr_processed.security_category + ", " + pr.security_category pr_processed.save(update_fields=["security_category"]) pr.post_process = True pr.save(update_fields=["post_process"]) else: self._process_pr(pr) last_dbalarm_time = None except Exception as err: self._logger.critical("{0}\n{1}\n".format(type(err), traceback.format_exc()) + pr.pull_request_url) self._logger.info("finished processing") # Sleep time.sleep(sleep_time)
47.3125
121
0.591645
import time import traceback from django.db import connection from django.db.backends.base.base import BaseDatabaseWrapper from django.db.utils import OperationalError from django.utils import timezone from external_tools.github.GithubConfig import GithubConfig from external_tools.github.GithubPullRequest import GithubPullRequest from common import constants from core.models import ScorebotConfig, ScorebotMetrics, PostProcessMetrics, ScorebotControlCpp, \ ScorebotControlJava, ScorebotControlKraken from sb_service.common import daemon_utils class PostProcessDaemon: def __init__(self, framework, logger): self.framework = framework self._logger = logger if self.framework == "CPP": self.ScorebotControl = ScorebotControlCpp elif self.framework == "Java": self.ScorebotControl = ScorebotControlJava elif self.framework == "Kraken": self.ScorebotControl = ScorebotControlKraken def _load_pp(self): post_process_list = self.ScorebotControl.objects.filter(post_process=True).values_list('security_category', flat=True) pr_to_process = [] for cat in post_process_list: pr_to_process += ScorebotMetrics.objects.filter(security_category=cat, framework=self.framework, post_process=False) return pr_to_process def _process_pr(self, pr): github_pull_request = GithubPullRequest() config = GithubConfig() pr_url = pr.pull_request_url github_pull_request.load(config, pr_url) if github_pull_request.state == "closed": try: merged_bool = github_pull_request.merged if merged_bool: pr_state = "merged" merged_user = github_pull_request.merged_by else: pr_state = "closed" merged_user = github_pull_request.closed_by PostProcessMetrics.objects.create(pull_request_url=pr_url, state=pr_state, closed_user=merged_user, security_category=pr.security_category, framework=self.framework) pr.post_process = True pr.save(update_fields=["post_process"]) except Exception as err: self._logger.critical("{0}\n{1}\n".format(type(err), traceback.format_exc()) + pr.pull_request_url) def _restore_db_connection(self): try: cursor = connection.cursor() db = cursor.db assert issubclass(db.__class__, BaseDatabaseWrapper) if db.connection is None or not db.is_usable(): db.close_if_unusable_or_obsolete() with db.wrap_database_errors: db.connect() self._logger.info('Restoring the MySQL connection') except Exception as err: self._logger.critical('DB connection error') self._logger.critical("{0}\n{1}\n".format(type(err), traceback.format_exc())) def run(self): last_dbalarm_time = None while True: self._logger.info("start run") self._restore_db_connection() try: sleep_time = int(ScorebotConfig.objects.filter(config="post_process_sleeptime").values()[0]['value']) except OperationalError as err: connection.close() if daemon_utils.is_time_up(constants.DB_EXCEPTION_CHECK_TIME, last_dbalarm_time, True): self._logger.info("{0}\n{1}\n".format(type(err), traceback.format_exc())) last_dbalarm_time = timezone.now() else: self._logger.error("***ORM DB Connection*** Still getting OperationalError from DB connector") continue except Exception as err: self._logger.critical("{0}\n{1}\n".format(type(err), traceback.format_exc())) return pr_to_process = self._load_pp() self._logger.info("processing PRs") for pr in pr_to_process: try: self._logger.info("processing pr: " + pr.pull_request_url) pr_processed = PostProcessMetrics.objects.filter(pull_request_url=pr.pull_request_url) if pr.scorebot_mode == "silent": pr.post_process = True pr.save(update_fields=["post_process"]) elif pr_processed and pr.scorebot_mode is not "silent": pr_processed = pr_processed[0] if pr.security_category not in pr_processed.security_category: pr_processed.security_category = pr_processed.security_category + ", " + pr.security_category pr_processed.save(update_fields=["security_category"]) pr.post_process = True pr.save(update_fields=["post_process"]) else: self._process_pr(pr) last_dbalarm_time = None except Exception as err: self._logger.critical("{0}\n{1}\n".format(type(err), traceback.format_exc()) + pr.pull_request_url) self._logger.info("finished processing") time.sleep(sleep_time)
true
true
1c3558d607658f8dea73cab624fa5807f1ade4f4
4,544
py
Python
plots.py
olihawkins/penguin-models
fabecdf6336390fc50e67cfd8494ade69fc3ef7f
[ "BSD-3-Clause" ]
1
2021-05-05T10:17:01.000Z
2021-05-05T10:17:01.000Z
plots.py
olihawkins/penguin-models
fabecdf6336390fc50e67cfd8494ade69fc3ef7f
[ "BSD-3-Clause" ]
null
null
null
plots.py
olihawkins/penguin-models
fabecdf6336390fc50e67cfd8494ade69fc3ef7f
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """A module for plotting penguins data for modelling with scikit-learn.""" # Imports --------------------------------------------------------------------- import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd # Constants ------------------------------------------------------------------- SPECIES_COLORS = { 'Adelie': '#4daf4a', 'Gentoo': '#ffb000', 'Chinstrap': '#0084f7' } X_AXIS = [30, 60] Y_AXIS = [12, 22] # Set style ------------------------------------------------------------------- # Load the style from a file plt.style.use('./style/eda.mplstyle') # Alternatively, load the style from the library in ~/.matplotlib/stylelib # plt.style.use(['eda']) # Functions ------------------------------------------------------------------- def get_contour_data(model, pipeline, n_points=1000): """Create the data used to show the boundary of the decision function.""" x0s = np.linspace(X_AXIS[0], X_AXIS[1], n_points) x1s = np.linspace(Y_AXIS[0], Y_AXIS[1], n_points) x0, x1 = np.meshgrid(x0s, x1s) X = np.c_[x0.ravel(), x1.ravel()] df_X = pd.DataFrame(X, columns=['bill_length_mm', 'bill_depth_mm']) X = pipeline.transform(df_X) y_pred = model.predict(X).reshape(x0.shape) y_decision = model.decision_function(X).reshape(x0.shape) return x0, x1, y_pred, y_decision def get_target_colors(target): """Create a dictionary of colors to use in binary classification plots.""" return { target : '#984ea3', 'Other': '#ff7f00' } # Plots ----------------------------------------------------------------------- def plot_example(): plt.style.reload_library() plt.style.use(['eda']) fig, ax = plt.subplots() ax.set_title('Some random words of the title') ax.scatter(np.random.normal(0,1,10), np.random.normal(0,1,10)) fig.savefig('plots/test.svg', format='svg') fig.savefig('plots/test.png', format='png') plt.close() def plot_target_by_features(df): """Plot the different target species.""" fig, ax = plt.subplots() ax.set_title( label='Palmer penguins by species and bill characteristics', loc='center') ax.get_xaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_xlim(X_AXIS[0], X_AXIS[1]) ax.set_xlabel('Bill length (mm)') ax.get_yaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_ylim(Y_AXIS[0], Y_AXIS[1]) ax.set_ylabel('Bill depth (mm)') grouped = df.groupby('species') for key, group in grouped: ax.scatter( group['bill_length_mm'], group['bill_depth_mm'], c=SPECIES_COLORS[key], s=40, label=key, alpha=0.55) ax.legend(loc='lower left', handletextpad=0.2) fig.savefig('plots/target-by-features.png', format='png') plt.close() def plot_model(df, model, pipeline, f_score, target, title, filename): """Plot the results of a binary classification model.""" fig, ax = plt.subplots() ax.set_title(title, loc='center') ax.get_xaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_xlim(X_AXIS[0], X_AXIS[1]) ax.set_xlabel('Bill length (mm)') ax.get_yaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_ylim(Y_AXIS[0], Y_AXIS[1]) ax.set_ylabel('Bill depth (mm)') # Plot the boundary of the decision function x0, x1, y_pred, y_decision = get_contour_data(model, pipeline) ax.contourf(x0, x1, y_pred, cmap=plt.cm.PuOr, alpha=0.2) # This plots the decision score, if needed # ax.contourf(x0, x1, y_decision, cmap=plt.cm.PuOr, alpha=0.1) df = df.copy() df['species'] = df['target'].apply(lambda t: target if t == 1 else 'Other') colors = get_target_colors(target) grouped = df.groupby('species') for key, group in grouped: ax.scatter( group['bill_length_mm'], group['bill_depth_mm'], c=colors[key], s=40, label=key, alpha=0.55) ax.legend(loc='lower left', handletextpad=0.2) bbox_style = { 'boxstyle': 'round', 'facecolor': '#ffffff', 'edgecolor': '#d4d4d4', 'alpha': 0.8 } ax.text(53, 12.415, '$F_1$ score: {0}'.format(f_score), bbox=bbox_style) fig.savefig('plots/{0}.png'.format(filename), format='png') plt.close()
28.759494
79
0.574604
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd SPECIES_COLORS = { 'Adelie': '#4daf4a', 'Gentoo': '#ffb000', 'Chinstrap': '#0084f7' } X_AXIS = [30, 60] Y_AXIS = [12, 22] plt.style.use('./style/eda.mplstyle') def get_contour_data(model, pipeline, n_points=1000): x0s = np.linspace(X_AXIS[0], X_AXIS[1], n_points) x1s = np.linspace(Y_AXIS[0], Y_AXIS[1], n_points) x0, x1 = np.meshgrid(x0s, x1s) X = np.c_[x0.ravel(), x1.ravel()] df_X = pd.DataFrame(X, columns=['bill_length_mm', 'bill_depth_mm']) X = pipeline.transform(df_X) y_pred = model.predict(X).reshape(x0.shape) y_decision = model.decision_function(X).reshape(x0.shape) return x0, x1, y_pred, y_decision def get_target_colors(target): return { target : '#984ea3', 'Other': '#ff7f00' } def plot_example(): plt.style.reload_library() plt.style.use(['eda']) fig, ax = plt.subplots() ax.set_title('Some random words of the title') ax.scatter(np.random.normal(0,1,10), np.random.normal(0,1,10)) fig.savefig('plots/test.svg', format='svg') fig.savefig('plots/test.png', format='png') plt.close() def plot_target_by_features(df): fig, ax = plt.subplots() ax.set_title( label='Palmer penguins by species and bill characteristics', loc='center') ax.get_xaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_xlim(X_AXIS[0], X_AXIS[1]) ax.set_xlabel('Bill length (mm)') ax.get_yaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_ylim(Y_AXIS[0], Y_AXIS[1]) ax.set_ylabel('Bill depth (mm)') grouped = df.groupby('species') for key, group in grouped: ax.scatter( group['bill_length_mm'], group['bill_depth_mm'], c=SPECIES_COLORS[key], s=40, label=key, alpha=0.55) ax.legend(loc='lower left', handletextpad=0.2) fig.savefig('plots/target-by-features.png', format='png') plt.close() def plot_model(df, model, pipeline, f_score, target, title, filename): fig, ax = plt.subplots() ax.set_title(title, loc='center') ax.get_xaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_xlim(X_AXIS[0], X_AXIS[1]) ax.set_xlabel('Bill length (mm)') ax.get_yaxis().set_major_formatter( mpl.ticker.FormatStrFormatter('%.0f')) ax.set_ylim(Y_AXIS[0], Y_AXIS[1]) ax.set_ylabel('Bill depth (mm)') x0, x1, y_pred, y_decision = get_contour_data(model, pipeline) ax.contourf(x0, x1, y_pred, cmap=plt.cm.PuOr, alpha=0.2) df = df.copy() df['species'] = df['target'].apply(lambda t: target if t == 1 else 'Other') colors = get_target_colors(target) grouped = df.groupby('species') for key, group in grouped: ax.scatter( group['bill_length_mm'], group['bill_depth_mm'], c=colors[key], s=40, label=key, alpha=0.55) ax.legend(loc='lower left', handletextpad=0.2) bbox_style = { 'boxstyle': 'round', 'facecolor': '#ffffff', 'edgecolor': '#d4d4d4', 'alpha': 0.8 } ax.text(53, 12.415, '$F_1$ score: {0}'.format(f_score), bbox=bbox_style) fig.savefig('plots/{0}.png'.format(filename), format='png') plt.close()
true
true
1c3558edc6a9d9cc7f4e211516fa662ff2a8b455
6,049
py
Python
stanCode_projects/name_searching_system/babygraphics.py
kenhuang1204/stanCode_projects
f697a34a1c54a864c1140cb0f2f76e2d70b45698
[ "MIT" ]
null
null
null
stanCode_projects/name_searching_system/babygraphics.py
kenhuang1204/stanCode_projects
f697a34a1c54a864c1140cb0f2f76e2d70b45698
[ "MIT" ]
null
null
null
stanCode_projects/name_searching_system/babygraphics.py
kenhuang1204/stanCode_projects
f697a34a1c54a864c1140cb0f2f76e2d70b45698
[ "MIT" ]
null
null
null
""" SC101 Baby Names Project Adapted from Nick Parlante's Baby Names assignment by Jerry Liao. YOUR DESCRIPTION HERE: babygraphics.py draws the lines and name data such as names, years, and ranks on a canvas, which illustrates how the trend of the searched names change. """ import tkinter import babynames import babygraphicsgui as gui FILENAMES = [ 'data/full/baby-1900.txt', 'data/full/baby-1910.txt', 'data/full/baby-1920.txt', 'data/full/baby-1930.txt', 'data/full/baby-1940.txt', 'data/full/baby-1950.txt', 'data/full/baby-1960.txt', 'data/full/baby-1970.txt', 'data/full/baby-1980.txt', 'data/full/baby-1990.txt', 'data/full/baby-2000.txt', 'data/full/baby-2010.txt' ] CANVAS_WIDTH = 1000 CANVAS_HEIGHT = 600 YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010] GRAPH_MARGIN_SIZE = 20 COLORS = ['red', 'purple', 'green', 'blue'] TEXT_DX = 2 LINE_WIDTH = 2 MAX_RANK = 1000 def get_x_coordinate(width, year_index): """ Given the width of the canvas and the index of the current year in the YEARS list, returns the x coordinate of the vertical line associated with that year. Input: width (int): The width of the canvas year_index (int): The index of the current year in the YEARS list Returns: x_coordinate (int): The x coordinate of the vertical line associated with the specified year. """ space = (width - GRAPH_MARGIN_SIZE * 2)/len(YEARS) x_coordinate = GRAPH_MARGIN_SIZE + space * year_index return x_coordinate def draw_fixed_lines(canvas): """ Erases all existing information on the given canvas and then draws the fixed background lines on it. Input: canvas (Tkinter Canvas): The canvas on which we are drawing. Returns: This function does not return any value. """ canvas.delete('all') # delete all existing lines from the canvas # Write your code below this line ################################# canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE) canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE) for i in range(len(YEARS)): x = get_x_coordinate(CANVAS_WIDTH, i) canvas.create_line(x, 0, x, CANVAS_HEIGHT) canvas.create_text(x + TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=YEARS[i], anchor=tkinter.NW) def draw_names(canvas, name_data, lookup_names): """ Given a dict of baby name data and a list of name, plots the historical trend of those names onto the canvas. Input: canvas (Tkinter Canvas): The canvas on which we are drawing. name_data (dict): Dictionary holding baby name data lookup_names (List[str]): A list of names whose data you want to plot Returns: This function does not return any value. """ draw_fixed_lines(canvas) # draw the fixed background grid # Write your code below this line ################################# color_num = 0 for lookup_n in lookup_names: if lookup_n in name_data: color_num += 1 color_index = (color_num % len(COLORS)) - 1 year1 = YEARS[0] x1 = get_x_coordinate(CANVAS_WIDTH, 0) if f'{year1}' in name_data[lookup_n]: # According to the data, if the name exists in the data, the name's rank is not larger than 1000 rank = int(name_data[lookup_n][f'{year1}']) y1 = GRAPH_MARGIN_SIZE + (rank - 1) * (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE * 2) / 999 canvas.create_text(x1 + TEXT_DX, y1, text=f'{lookup_n} {rank}', anchor=tkinter.SW, fill=COLORS[color_index]) else: # Names in a certain year with its ranking larger than 1000 will not appear in the data y1 = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE canvas.create_text(x1 + TEXT_DX, y1, text=f'{lookup_n} *', anchor=tkinter.SW, fill=COLORS[color_index]) for i in range(len(YEARS)-1): year = YEARS[i+1] x = get_x_coordinate(CANVAS_WIDTH, i+1) if f'{year}' in name_data[lookup_n]: # According to the data, if the name exists in the data, the name's rank is not larger than 1000 rank = int(name_data[lookup_n][f'{year}']) y = GRAPH_MARGIN_SIZE + (rank - 1) * (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE * 2) / 999 canvas.create_text(x + TEXT_DX, y, text=f'{lookup_n} {rank}', anchor=tkinter.SW, fill=COLORS[color_index]) canvas.create_line(x1, y1, x, y, width=LINE_WIDTH, fill=COLORS[color_index]) x1 = x y1 = y else: # Names in a certain year with its ranking larger than 1000 will not appear in the data y = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE canvas.create_text(x + TEXT_DX, y, text=f'{lookup_n} *', anchor=tkinter.SW, fill=COLORS[color_index]) canvas.create_line(x1, y1, x, y, width=LINE_WIDTH, fill=COLORS[color_index]) x1 = x y1 = y # main() code is provided, feel free to read through it but DO NOT MODIFY def main(): # Load data name_data = babynames.read_files(FILENAMES) # Create the window and the canvas top = tkinter.Tk() top.wm_title('Baby Names') canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names) # Call draw_fixed_lines() once at startup so we have the lines # even before the user types anything. draw_fixed_lines(canvas) # This line starts the graphical loop that is responsible for # processing user interactions and plotting data top.mainloop() if __name__ == '__main__': main()
40.597315
145
0.633824
import tkinter import babynames import babygraphicsgui as gui FILENAMES = [ 'data/full/baby-1900.txt', 'data/full/baby-1910.txt', 'data/full/baby-1920.txt', 'data/full/baby-1930.txt', 'data/full/baby-1940.txt', 'data/full/baby-1950.txt', 'data/full/baby-1960.txt', 'data/full/baby-1970.txt', 'data/full/baby-1980.txt', 'data/full/baby-1990.txt', 'data/full/baby-2000.txt', 'data/full/baby-2010.txt' ] CANVAS_WIDTH = 1000 CANVAS_HEIGHT = 600 YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010] GRAPH_MARGIN_SIZE = 20 COLORS = ['red', 'purple', 'green', 'blue'] TEXT_DX = 2 LINE_WIDTH = 2 MAX_RANK = 1000 def get_x_coordinate(width, year_index): space = (width - GRAPH_MARGIN_SIZE * 2)/len(YEARS) x_coordinate = GRAPH_MARGIN_SIZE + space * year_index return x_coordinate def draw_fixed_lines(canvas): canvas.delete('all') ata, lookup_names): draw_fixed_lines(canvas) , text=f'{lookup_n} {rank}', anchor=tkinter.SW, fill=COLORS[color_index]) else: # Names in a certain year with its ranking larger than 1000 will not appear in the data y1 = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE canvas.create_text(x1 + TEXT_DX, y1, text=f'{lookup_n} *', anchor=tkinter.SW, fill=COLORS[color_index]) for i in range(len(YEARS)-1): year = YEARS[i+1] x = get_x_coordinate(CANVAS_WIDTH, i+1) if f'{year}' in name_data[lookup_n]: # According to the data, if the name exists in the data, the name's rank is not larger than 1000 rank = int(name_data[lookup_n][f'{year}']) y = GRAPH_MARGIN_SIZE + (rank - 1) * (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE * 2) / 999 canvas.create_text(x + TEXT_DX, y, text=f'{lookup_n} {rank}', anchor=tkinter.SW, fill=COLORS[color_index]) canvas.create_line(x1, y1, x, y, width=LINE_WIDTH, fill=COLORS[color_index]) x1 = x y1 = y else: y = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE canvas.create_text(x + TEXT_DX, y, text=f'{lookup_n} *', anchor=tkinter.SW, fill=COLORS[color_index]) canvas.create_line(x1, y1, x, y, width=LINE_WIDTH, fill=COLORS[color_index]) x1 = x y1 = y def main(): name_data = babynames.read_files(FILENAMES) top = tkinter.Tk() top.wm_title('Baby Names') canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names) draw_fixed_lines(canvas) top.mainloop() if __name__ == '__main__': main()
true
true
1c3559ca49687ba7b8b184f1d128f1a7deafa9c6
597
py
Python
days/d01.py
masenius/aoc-2021
e2fcfe26e17e883ddd1e61dd33ad6b391f4c0490
[ "MIT" ]
null
null
null
days/d01.py
masenius/aoc-2021
e2fcfe26e17e883ddd1e61dd33ad6b391f4c0490
[ "MIT" ]
null
null
null
days/d01.py
masenius/aoc-2021
e2fcfe26e17e883ddd1e61dd33ad6b391f4c0490
[ "MIT" ]
null
null
null
import pytest def p1(data): return sum(data[i] > data[i - 1] for i in range(1, len(data))) def p2(data): return p1([sum(data[i - 2:i + 1]) for i in range(2, len(data))]) @pytest.fixture def test_data(): return [199, 200, 208, 210, 200, 207, 240, 269, 260, 263] def test_p1(test_data): assert p1(test_data) == 7 def test_p2(test_data): assert p2(test_data) == 5 def main(): with open("inputs/d01.input") as f: data = [int(x) for x in f.read().split("\n")] print(f"P1: {p1(data)}") print(f"P2: {p2(data)}") if __name__ == "__main__": main()
17.558824
68
0.58459
import pytest def p1(data): return sum(data[i] > data[i - 1] for i in range(1, len(data))) def p2(data): return p1([sum(data[i - 2:i + 1]) for i in range(2, len(data))]) @pytest.fixture def test_data(): return [199, 200, 208, 210, 200, 207, 240, 269, 260, 263] def test_p1(test_data): assert p1(test_data) == 7 def test_p2(test_data): assert p2(test_data) == 5 def main(): with open("inputs/d01.input") as f: data = [int(x) for x in f.read().split("\n")] print(f"P1: {p1(data)}") print(f"P2: {p2(data)}") if __name__ == "__main__": main()
true
true
1c355a9e57d2a0de1f15a469085d3b3ddf19e0e7
6,130
py
Python
hplip-3.20.3/ui5/nodevicesdialog_base.py
Deril-Pana/wikiBlackcoinNL
9633307f0b485c27feae5da242944adf450e8963
[ "MIT" ]
null
null
null
hplip-3.20.3/ui5/nodevicesdialog_base.py
Deril-Pana/wikiBlackcoinNL
9633307f0b485c27feae5da242944adf450e8963
[ "MIT" ]
1
2021-11-20T16:33:39.000Z
2021-11-20T16:33:39.000Z
hplip-3.20.3/ui5/nodevicesdialog_base.py
Deril-Pana/wikiBlackcoinNL
9633307f0b485c27feae5da242944adf450e8963
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/home/gaurav/work_qt5/src_qt5/ui5/nodevicesdialog_base.ui' # # Created by: PyQt5 UI code generator 5.4.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_NoDevicesDialog_base(object): def setupUi(self, NoDevicesDialog_base): NoDevicesDialog_base.setObjectName("NoDevicesDialog_base") NoDevicesDialog_base.resize(539, 335) self.gridlayout = QtWidgets.QGridLayout(NoDevicesDialog_base) self.gridlayout.setContentsMargins(11, 11, 11, 11) self.gridlayout.setSpacing(6) self.gridlayout.setObjectName("gridlayout") self.Icon = QtWidgets.QLabel(NoDevicesDialog_base) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.Icon.sizePolicy().hasHeightForWidth()) self.Icon.setSizePolicy(sizePolicy) self.Icon.setFrameShape(QtWidgets.QFrame.NoFrame) self.Icon.setScaledContents(True) self.Icon.setWordWrap(False) self.Icon.setObjectName("Icon") self.gridlayout.addWidget(self.Icon, 0, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 280, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridlayout.addItem(spacerItem, 1, 0, 1, 1) spacerItem1 = QtWidgets.QSpacerItem(20, 16, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridlayout.addItem(spacerItem1, 2, 2, 1, 1) self.textLabel7 = QtWidgets.QLabel(NoDevicesDialog_base) self.textLabel7.setAlignment(QtCore.Qt.AlignVCenter) self.textLabel7.setWordWrap(True) self.textLabel7.setObjectName("textLabel7") self.gridlayout.addWidget(self.textLabel7, 0, 1, 2, 4) spacerItem2 = QtWidgets.QSpacerItem(400, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.gridlayout.addItem(spacerItem2, 3, 0, 1, 2) self.SetupButton = QtWidgets.QPushButton(NoDevicesDialog_base) self.SetupButton.setObjectName("SetupButton") self.gridlayout.addWidget(self.SetupButton, 3, 2, 1, 1) self.CUPSButton = QtWidgets.QPushButton(NoDevicesDialog_base) self.CUPSButton.setObjectName("CUPSButton") self.gridlayout.addWidget(self.CUPSButton, 3, 3, 1, 1) self.CloseButton = QtWidgets.QPushButton(NoDevicesDialog_base) self.CloseButton.setDefault(True) self.CloseButton.setObjectName("CloseButton") self.gridlayout.addWidget(self.CloseButton, 3, 4, 1, 1) self.retranslateUi(NoDevicesDialog_base) QtCore.QMetaObject.connectSlotsByName(NoDevicesDialog_base) def retranslateUi(self, NoDevicesDialog_base): _translate = QtCore.QCoreApplication.translate NoDevicesDialog_base.setWindowTitle(_translate("NoDevicesDialog_base", "HP Device Manager - No Installed HP Devices Found")) self.textLabel7.setText(_translate("NoDevicesDialog_base", "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Sans Serif\'; font-size:9pt; font-weight:400; font-style:normal;\">\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:x-large; font-weight:600;\">No Installed HP Devices Found.</span></p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">To setup a new device in the HP Device Manager (toolbox), use one of the following methods:</p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">1.Run <span style=\" font-weight:600;\">hp-setup</span> (in a shell/terminal or click <span style=\" font-family:\'Courier New,courier\';\">Setup Device...</span> below).</p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">2. <span style=\" font-weight:600;\">CUPS web interface</span> (open a browser to: <span style=\" text-decoration: underline;\">http://localhost:631</span> or press the button below),</p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">3. The <span style=\" font-weight:600;\">printer installation utility</span> that came with your operating system (YaST, PrinterDrake, etc). </p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">After setting up a printer, you may have to press <span style=\" font-family:\'Courier New,courier\';\">F6</span> or chose <span style=\" font-family:\'Courier New,courier\';\">Device | Refresh All</span> for the printer to appear in the HP Device Manager.</p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600; font-style:italic;\">Note: Only devices installed with the </span><span style=\" font-family:\'Courier New,courier\'; font-weight:600; font-style:italic;\">hp:</span><span style=\" font-weight:600; font-style:italic;\"> or </span><span style=\" font-family:\'Courier New,courier\'; font-weight:600; font-style:italic;\">hpfax:</span><span style=\" font-weight:600; font-style:italic;\"> CUPS backend will appear in the HP Device Manager.</span></p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p></body></html>")) self.SetupButton.setText(_translate("NoDevicesDialog_base", "Setup Device...")) self.CUPSButton.setText(_translate("NoDevicesDialog_base", "CUPS Web Interface")) self.CloseButton.setText(_translate("NoDevicesDialog_base", "Close"))
83.972603
607
0.717455
from PyQt5 import QtCore, QtGui, QtWidgets class Ui_NoDevicesDialog_base(object): def setupUi(self, NoDevicesDialog_base): NoDevicesDialog_base.setObjectName("NoDevicesDialog_base") NoDevicesDialog_base.resize(539, 335) self.gridlayout = QtWidgets.QGridLayout(NoDevicesDialog_base) self.gridlayout.setContentsMargins(11, 11, 11, 11) self.gridlayout.setSpacing(6) self.gridlayout.setObjectName("gridlayout") self.Icon = QtWidgets.QLabel(NoDevicesDialog_base) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.Icon.sizePolicy().hasHeightForWidth()) self.Icon.setSizePolicy(sizePolicy) self.Icon.setFrameShape(QtWidgets.QFrame.NoFrame) self.Icon.setScaledContents(True) self.Icon.setWordWrap(False) self.Icon.setObjectName("Icon") self.gridlayout.addWidget(self.Icon, 0, 0, 1, 1) spacerItem = QtWidgets.QSpacerItem(20, 280, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridlayout.addItem(spacerItem, 1, 0, 1, 1) spacerItem1 = QtWidgets.QSpacerItem(20, 16, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridlayout.addItem(spacerItem1, 2, 2, 1, 1) self.textLabel7 = QtWidgets.QLabel(NoDevicesDialog_base) self.textLabel7.setAlignment(QtCore.Qt.AlignVCenter) self.textLabel7.setWordWrap(True) self.textLabel7.setObjectName("textLabel7") self.gridlayout.addWidget(self.textLabel7, 0, 1, 2, 4) spacerItem2 = QtWidgets.QSpacerItem(400, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.gridlayout.addItem(spacerItem2, 3, 0, 1, 2) self.SetupButton = QtWidgets.QPushButton(NoDevicesDialog_base) self.SetupButton.setObjectName("SetupButton") self.gridlayout.addWidget(self.SetupButton, 3, 2, 1, 1) self.CUPSButton = QtWidgets.QPushButton(NoDevicesDialog_base) self.CUPSButton.setObjectName("CUPSButton") self.gridlayout.addWidget(self.CUPSButton, 3, 3, 1, 1) self.CloseButton = QtWidgets.QPushButton(NoDevicesDialog_base) self.CloseButton.setDefault(True) self.CloseButton.setObjectName("CloseButton") self.gridlayout.addWidget(self.CloseButton, 3, 4, 1, 1) self.retranslateUi(NoDevicesDialog_base) QtCore.QMetaObject.connectSlotsByName(NoDevicesDialog_base) def retranslateUi(self, NoDevicesDialog_base): _translate = QtCore.QCoreApplication.translate NoDevicesDialog_base.setWindowTitle(_translate("NoDevicesDialog_base", "HP Device Manager - No Installed HP Devices Found")) self.textLabel7.setText(_translate("NoDevicesDialog_base", "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Sans Serif\'; font-size:9pt; font-weight:400; font-style:normal;\">\n" "<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:x-large; font-weight:600;\">No Installed HP Devices Found.</span></p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">To setup a new device in the HP Device Manager (toolbox), use one of the following methods:</p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">1.Run <span style=\" font-weight:600;\">hp-setup</span> (in a shell/terminal or click <span style=\" font-family:\'Courier New,courier\';\">Setup Device...</span> below).</p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">2. <span style=\" font-weight:600;\">CUPS web interface</span> (open a browser to: <span style=\" text-decoration: underline;\">http://localhost:631</span> or press the button below),</p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">3. The <span style=\" font-weight:600;\">printer installation utility</span> that came with your operating system (YaST, PrinterDrake, etc). </p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">After setting up a printer, you may have to press <span style=\" font-family:\'Courier New,courier\';\">F6</span> or chose <span style=\" font-family:\'Courier New,courier\';\">Device | Refresh All</span> for the printer to appear in the HP Device Manager.</p>\n" "<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600; font-style:italic;\">Note: Only devices installed with the </span><span style=\" font-family:\'Courier New,courier\'; font-weight:600; font-style:italic;\">hp:</span><span style=\" font-weight:600; font-style:italic;\"> or </span><span style=\" font-family:\'Courier New,courier\'; font-weight:600; font-style:italic;\">hpfax:</span><span style=\" font-weight:600; font-style:italic;\"> CUPS backend will appear in the HP Device Manager.</span></p>\n" "<p style=\"-qt-paragraph-type:empty; margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p></body></html>")) self.SetupButton.setText(_translate("NoDevicesDialog_base", "Setup Device...")) self.CUPSButton.setText(_translate("NoDevicesDialog_base", "CUPS Web Interface")) self.CloseButton.setText(_translate("NoDevicesDialog_base", "Close"))
true
true
1c355b35c10d1e9bedeeb5e047f7d7b8d18570ba
274
py
Python
tea_admin/tea_admin/doctype/salary_structure/test_salary_structure.py
nivedita05/Tea-Admin
4267cfa9d0c97e586bd9b5401e310bcd8ec93268
[ "MIT" ]
null
null
null
tea_admin/tea_admin/doctype/salary_structure/test_salary_structure.py
nivedita05/Tea-Admin
4267cfa9d0c97e586bd9b5401e310bcd8ec93268
[ "MIT" ]
null
null
null
tea_admin/tea_admin/doctype/salary_structure/test_salary_structure.py
nivedita05/Tea-Admin
4267cfa9d0c97e586bd9b5401e310bcd8ec93268
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest # test_records = frappe.get_test_records('Salary Structure') class TestSalaryStructure(unittest.TestCase): pass
21.076923
60
0.777372
from __future__ import unicode_literals import frappe import unittest class TestSalaryStructure(unittest.TestCase): pass
true
true
1c355c32ed7f483f2ec6556256cbeccaa11ab0bf
1,488
py
Python
python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py
L-Net-1992/Paddle
4d0ca02ba56760b456f3d4b42a538555b9b6c307
[ "Apache-2.0" ]
11
2016-08-29T07:43:26.000Z
2016-08-29T07:51:24.000Z
python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py
L-Net-1992/Paddle
4d0ca02ba56760b456f3d4b42a538555b9b6c307
[ "Apache-2.0" ]
null
null
null
python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py
L-Net-1992/Paddle
4d0ca02ba56760b456f3d4b42a538555b9b6c307
[ "Apache-2.0" ]
1
2021-12-09T08:59:17.000Z
2021-12-09T08:59:17.000Z
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle.fluid as fluid fluid.core._set_fuse_parameter_group_size(3) fluid.core._set_fuse_parameter_memory_size(131072) import unittest import seresnext_net from seresnext_test_base import TestResnetBase, DeviceType from functools import partial class TestResnetWithFuseAllReduceCPU(TestResnetBase): def test_seresnext_with_fused_all_reduce(self): # NOTE(zcd): In order to make the program faster, # this unit test remove drop_out and batch_norm. check_func = partial(self.check_network_convergence, optimizer=seresnext_net.optimizer, fuse_all_reduce_ops=True) self._compare_result_with_origin_model(check_func, use_device=DeviceType.CPU) if __name__ == '__main__': unittest.main()
36.292683
74
0.731855
from __future__ import print_function import paddle.fluid as fluid fluid.core._set_fuse_parameter_group_size(3) fluid.core._set_fuse_parameter_memory_size(131072) import unittest import seresnext_net from seresnext_test_base import TestResnetBase, DeviceType from functools import partial class TestResnetWithFuseAllReduceCPU(TestResnetBase): def test_seresnext_with_fused_all_reduce(self): check_func = partial(self.check_network_convergence, optimizer=seresnext_net.optimizer, fuse_all_reduce_ops=True) self._compare_result_with_origin_model(check_func, use_device=DeviceType.CPU) if __name__ == '__main__': unittest.main()
true
true
1c355c88e357d81dc7fa2ee4552113bcf39d70b6
264
py
Python
application/docs/views.py
meshack-mbuvi/ride-my-way-api
2e091f020e774cad4891427a2de91a7a2c29def1
[ "MIT" ]
null
null
null
application/docs/views.py
meshack-mbuvi/ride-my-way-api
2e091f020e774cad4891427a2de91a7a2c29def1
[ "MIT" ]
19
2018-06-23T17:08:37.000Z
2018-07-23T17:37:55.000Z
application/docs/views.py
meshack-mbuvi/ride-my-way-api
2e091f020e774cad4891427a2de91a7a2c29def1
[ "MIT" ]
null
null
null
from flask import Blueprint, render_template docs = Blueprint('docs', __name__, static_folder='static', template_folder='templates') @docs.route('/api/v1/docs') def index(): "Show an index template" return render_template('docs.html')
22
58
0.685606
from flask import Blueprint, render_template docs = Blueprint('docs', __name__, static_folder='static', template_folder='templates') @docs.route('/api/v1/docs') def index(): return render_template('docs.html')
true
true
1c355f1ba7b5ff636b60f1fae401e09fa7f4783c
2,938
py
Python
lambda/s3trigger.py
CurryEleison/lambda-metrics
0954273a1aec0f8c8ea867383e33a9acc9c785cf
[ "MIT" ]
null
null
null
lambda/s3trigger.py
CurryEleison/lambda-metrics
0954273a1aec0f8c8ea867383e33a9acc9c785cf
[ "MIT" ]
null
null
null
lambda/s3trigger.py
CurryEleison/lambda-metrics
0954273a1aec0f8c8ea867383e33a9acc9c785cf
[ "MIT" ]
null
null
null
import boto3 import numpy as np import logging import pandas as pd import datetime from AwsElbLogUtil import LogDataFrame from CloudWatchUtil import CustomMetricSender logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def send_random(event, context): data = np.random.randn(5) logging.info(data) def send_metrics_tpltest(event, context): objlist = [] client = boto3.resource('s3') for record in event['Records']: objlist.append( client.ObjectSummary( record['s3']['bucket']['name'], record['s3']['object']['key'] ) ) # bucket = record['s3']['bucket']['name'] # keds.append(record['s3']['object']['key']) #download_path = '/tmp/{}{}'.format(uuid.uuid4(), key) #s3_client.download_file(bucket, key, download_path) dfmaker = LogDataFrame(client) for s3objsummary in objlist: logging.info(s3objsummary.key) df = dfmaker.make_dataframe(objlist, lambda l: hasattr(l, 'path') and l.path.startswith('/tpltest')) # urltimetaken = df[['path', 'servertime']].groupby('path').sum().sort_values('servertime', ascending=False) #.agg({'servertime', 'sum'}) # print urltimetaken.head(10) df['roundedtime'] = df['utctime'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute)) df = df.assign(mintime = df.servertime).assign(maxtime = df.servertime).assign(sumtime = df.servertime).assign(reccount = df.method) summary = df.groupby('roundedtime').agg( { 'maxtime': 'max', 'mintime': 'min', 'sumtime': 'sum', 'reccount': 'count' } ) sender = CustomMetricSender('ExperimentalCustom', 'TpltestTimings') for index, item in summary.iterrows(): logging.info("At {4} Min: {0}, Max: {1}, Sum: {2}, Count: {3}".format(item['mintime'], item['maxtime'], item['sumtime'], item['reccount'], index)) resp = sender.senddataaggregate(datatime = index, datalength = item['reccount'], datasum = item['sumtime'], datamin = item['mintime'], datamax = item['maxtime']) logging.info(resp)
51.54386
208
0.465623
import boto3 import numpy as np import logging import pandas as pd import datetime from AwsElbLogUtil import LogDataFrame from CloudWatchUtil import CustomMetricSender logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def send_random(event, context): data = np.random.randn(5) logging.info(data) def send_metrics_tpltest(event, context): objlist = [] client = boto3.resource('s3') for record in event['Records']: objlist.append( client.ObjectSummary( record['s3']['bucket']['name'], record['s3']['object']['key'] ) ) dfmaker = LogDataFrame(client) for s3objsummary in objlist: logging.info(s3objsummary.key) df = dfmaker.make_dataframe(objlist, lambda l: hasattr(l, 'path') and l.path.startswith('/tpltest')) = df['utctime'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute)) df = df.assign(mintime = df.servertime).assign(maxtime = df.servertime).assign(sumtime = df.servertime).assign(reccount = df.method) summary = df.groupby('roundedtime').agg( { 'maxtime': 'max', 'mintime': 'min', 'sumtime': 'sum', 'reccount': 'count' } ) sender = CustomMetricSender('ExperimentalCustom', 'TpltestTimings') for index, item in summary.iterrows(): logging.info("At {4} Min: {0}, Max: {1}, Sum: {2}, Count: {3}".format(item['mintime'], item['maxtime'], item['sumtime'], item['reccount'], index)) resp = sender.senddataaggregate(datatime = index, datalength = item['reccount'], datasum = item['sumtime'], datamin = item['mintime'], datamax = item['maxtime']) logging.info(resp)
true
true
1c355f6286858803c3e1246758b476717339cf14
270,565
py
Python
cinder/db/sqlalchemy/api.py
elastx/cinder
00519aef92258275b68b78355734cda31c354839
[ "Apache-2.0" ]
3
2015-04-02T21:44:36.000Z
2016-04-29T21:19:04.000Z
cinder/db/sqlalchemy/api.py
elastx/cinder
00519aef92258275b68b78355734cda31c354839
[ "Apache-2.0" ]
3
2016-04-29T21:45:26.000Z
2016-05-04T19:41:23.000Z
cinder/db/sqlalchemy/api.py
elastx/cinder
00519aef92258275b68b78355734cda31c354839
[ "Apache-2.0" ]
4
2016-01-27T00:25:52.000Z
2021-03-25T19:54:08.000Z
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import collections from collections import abc import datetime as dt import functools import itertools import re import sys import uuid from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db import options from oslo_db.sqlalchemy import enginefacade from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') import six import sqlalchemy from sqlalchemy import MetaData from sqlalchemy import or_, and_, case from sqlalchemy.orm import joinedload, joinedload_all, undefer_group, load_only from sqlalchemy.orm import RelationshipProperty from sqlalchemy import sql from sqlalchemy.sql.expression import bindparam from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import true from sqlalchemy.sql import func from sqlalchemy.sql import sqltypes from cinder.api import common from cinder.common import sqlalchemyutils from cinder import db from cinder.db.sqlalchemy import models from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite') main_context_manager = enginefacade.transaction_context() def configure(conf): main_context_manager.configure(**dict(conf.database)) # NOTE(geguileo): To avoid a cyclical dependency we import the # group here. Dependency cycle is objects.base requires db.api, # which requires db.sqlalchemy.api, which requires service which # requires objects.base CONF.import_group("profiler", "cinder.service") if CONF.profiler.enabled: if CONF.profiler.trace_sqlalchemy: lambda eng: osprofiler_sqlalchemy.add_tracing(sqlalchemy, eng, "db") def get_engine(use_slave=False): return main_context_manager._factory.get_legacy_facade().get_engine( use_slave=use_slave) def get_session(use_slave=False, **kwargs): return main_context_manager._factory.get_legacy_facade().get_session( use_slave=use_slave, **kwargs) def dispose_engine(): get_engine().dispose() _DEFAULT_QUOTA_NAME = 'default' def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: raise exception.CinderException( 'Use of empty request context is deprecated') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_volume_exists(f): """Decorator to require the specified volume to exist. Requires the wrapped function to use context and volume_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, volume_id, *args, **kwargs): if not resource_exists(context, models.Volume, volume_id): raise exception.VolumeNotFound(volume_id=volume_id) return f(context, volume_id, *args, **kwargs) return wrapper def require_snapshot_exists(f): """Decorator to require the specified snapshot to exist. Requires the wrapped function to use context and snapshot_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, snapshot_id, *args, **kwargs): if not resource_exists(context, models.Snapshot, snapshot_id): raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return f(context, snapshot_id, *args, **kwargs) return wrapper def require_backup_exists(f): """Decorator to require the specified snapshot to exist. Requires the wrapped function to use context and backup_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, backup_id, *args, **kwargs): if not resource_exists(context, models.Backup, backup_id): raise exception.BackupNotFound(backup_id=backup_id) return f(context, backup_id, *args, **kwargs) return wrapper def handle_db_data_error(f): def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) return wrapper def model_query(context, model, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param model: Model to query. Must be a subclass of ModelBase. :param args: Arguments to query. If None - model is used. :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(model, *args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter_by(deleted=True) elif read_deleted == 'int_no': query = query.filter_by(deleted=0) else: raise Exception( _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): if model is models.VolumeAttachment: # NOTE(dulek): In case of VolumeAttachment, we need to join # `project_id` through `volume` relationship. query = query.filter(models.Volume.project_id == context.project_id) else: query = query.filter_by(project_id=context.project_id) return query def _sync_volumes(context, project_id, session, volume_type_id=None, volume_type_name=None): (volumes, _gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'volumes' if volume_type_name: key += '_' + volume_type_name return {key: volumes} def _sync_snapshots(context, project_id, session, volume_type_id=None, volume_type_name=None): (snapshots, _gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'snapshots' if volume_type_name: key += '_' + volume_type_name return {key: snapshots} def _sync_backups(context, project_id, session, volume_type_id=None, volume_type_name=None): (backups, _gigs) = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'backups' return {key: backups} def _sync_gigabytes(context, project_id, session, volume_type_id=None, volume_type_name=None): (_junk, vol_gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'gigabytes' if volume_type_name: key += '_' + volume_type_name if CONF.no_snapshot_gb_quota: return {key: vol_gigs} (_junk, snap_gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) return {key: vol_gigs + snap_gigs} def _sync_consistencygroups(context, project_id, session, volume_type_id=None, volume_type_name=None): (_junk, groups) = _consistencygroup_data_get_for_project( context, project_id, session=session) key = 'consistencygroups' return {key: groups} def _sync_groups(context, project_id, session, volume_type_id=None, volume_type_name=None): (_junk, groups) = _group_data_get_for_project( context, project_id, session=session) key = 'groups' return {key: groups} def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None, volume_type_name=None): key = 'backup_gigabytes' (_junk, backup_gigs) = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) return {key: backup_gigs} QUOTA_SYNC_FUNCTIONS = { '_sync_volumes': _sync_volumes, '_sync_snapshots': _sync_snapshots, '_sync_gigabytes': _sync_gigabytes, '_sync_consistencygroups': _sync_consistencygroups, '_sync_backups': _sync_backups, '_sync_backup_gigabytes': _sync_backup_gigabytes, '_sync_groups': _sync_groups, } ################### def _clean_filters(filters): return {k: v for k, v in filters.items() if v is not None} def _filter_host(field, value, match_level=None): """Generate a filter condition for host and cluster fields. Levels are: - 'pool': Will search for an exact match - 'backend': Will search for exact match and value#* - 'host'; Will search for exact match, value@* and value#* If no level is provided we'll determine it based on the value we want to match: - 'pool': If '#' is present in value - 'backend': If '@' is present in value and '#' is not present - 'host': In any other case :param field: ORM field. Ex: objects.Volume.model.host :param value: String to compare with :param match_level: 'pool', 'backend', or 'host' """ # If we don't set level we'll try to determine it automatically. LIKE # operations are expensive, so we try to reduce them to the minimum. if match_level is None: if '#' in value: match_level = 'pool' elif '@' in value: match_level = 'backend' else: match_level = 'host' # Mysql is not doing case sensitive filtering, so we force it conn_str = CONF.database.connection if conn_str.startswith('mysql') and conn_str[5] in ['+', ':']: cmp_value = func.binary(value) like_op = 'LIKE BINARY' else: cmp_value = value like_op = 'LIKE' conditions = [field == cmp_value] if match_level != 'pool': conditions.append(field.op(like_op)(value + '#%')) if match_level == 'host': conditions.append(field.op(like_op)(value + '@%')) return or_(*conditions) def _filter_time_comparison(field, time_filter_dict): """Generate a filter condition for time comparison operators""" conditions = [] for operator in time_filter_dict: filter_time = timeutils.normalize_time(time_filter_dict[operator]) if operator == 'gt': conditions.append(field.op('>')(filter_time)) elif operator == 'gte': conditions.append(field.op('>=')(filter_time)) if operator == 'eq': conditions.append(field.op('=')(filter_time)) elif operator == 'neq': conditions.append(field.op('!=')(filter_time)) if operator == 'lt': conditions.append(field.op('<')(filter_time)) elif operator == 'lte': conditions.append(field.op('<=')(filter_time)) return or_(*conditions) def _clustered_bool_field_filter(query, field_name, filter_value): # Now that we have clusters, a service is disabled/frozen if the service # doesn't belong to a cluster or if it belongs to a cluster and the cluster # itself is disabled/frozen. if filter_value is not None: query_filter = or_( and_(models.Service.cluster_name.is_(None), getattr(models.Service, field_name)), and_(models.Service.cluster_name.isnot(None), sql.exists().where(and_( models.Cluster.name == models.Service.cluster_name, models.Cluster.binary == models.Service.binary, ~models.Cluster.deleted, getattr(models.Cluster, field_name))))) if not filter_value: query_filter = ~query_filter query = query.filter(query_filter) return query def _service_query(context, session=None, read_deleted='no', host=None, cluster_name=None, is_up=None, host_or_cluster=None, backend_match_level=None, disabled=None, frozen=None, **filters): filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Service, filters): return None query = model_query(context, models.Service, session=session, read_deleted=read_deleted) # Host and cluster are particular cases of filters, because we must # retrieve not only exact matches (single backend configuration), but also # match those that have the backend defined (multi backend configuration). if host: query = query.filter(_filter_host(models.Service.host, host, backend_match_level)) if cluster_name: query = query.filter(_filter_host(models.Service.cluster_name, cluster_name, backend_match_level)) if host_or_cluster: query = query.filter(or_( _filter_host(models.Service.host, host_or_cluster, backend_match_level), _filter_host(models.Service.cluster_name, host_or_cluster, backend_match_level), )) query = _clustered_bool_field_filter(query, 'disabled', disabled) query = _clustered_bool_field_filter(query, 'frozen', frozen) if filters: query = query.filter_by(**filters) if is_up is not None: date_limit = utils.service_expired_time() svc = models.Service filter_ = or_( and_(svc.created_at.isnot(None), svc.created_at >= date_limit), and_(svc.updated_at.isnot(None), svc.updated_at >= date_limit)) query = query.filter(filter_ == is_up) return query @require_admin_context def service_destroy(context, service_id): query = _service_query(context, id=service_id) updated_values = models.Service.delete_values() if not query.update(updated_values): raise exception.ServiceNotFound(service_id=service_id) return updated_values @require_admin_context def service_get(context, service_id=None, backend_match_level=None, **filters): """Get a service that matches the criteria. A possible filter is is_up=True and it will filter nodes that are down. :param service_id: Id of the service. :param filters: Filters for the query in the form of key/value. :param backend_match_level: 'pool', 'backend', or 'host' for host and cluster filters (as defined in _filter_host method) :raise ServiceNotFound: If service doesn't exist. """ query = _service_query(context, backend_match_level=backend_match_level, id=service_id, **filters) service = None if not query else query.first() if not service: serv_id = service_id or filters.get('topic') or filters.get('binary') raise exception.ServiceNotFound(service_id=serv_id, host=filters.get('host')) return service @require_admin_context def service_get_all(context, backend_match_level=None, **filters): """Get all services that match the criteria. A possible filter is is_up=True and it will filter nodes that are down. :param filters: Filters for the query in the form of key/value. :param backend_match_level: 'pool', 'backend', or 'host' for host and cluster filters (as defined in _filter_host method) """ query = _service_query(context, backend_match_level=backend_match_level, **filters) return [] if not query else query.all() @require_admin_context def service_get_by_uuid(context, service_uuid): query = model_query(context, models.Service).filter_by(uuid=service_uuid) result = query.first() if not result: raise exception.ServiceNotFound(service_id=service_uuid) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True session = get_session() with session.begin(): service_ref.save(session) return service_ref @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def service_update(context, service_id, values): if 'disabled' in values: values = values.copy() values['modified_at'] = values.get('modified_at', timeutils.utcnow()) values['updated_at'] = values.get('updated_at', literal_column('updated_at')) query = _service_query(context, id=service_id) result = query.update(values) if not result: raise exception.ServiceNotFound(service_id=service_id) @enginefacade.writer def untyped_volumes_online_data_migration(context, max_count): from cinder.volume import volume_types default_type = volume_types.get_volume_type_by_name(context, '__DEFAULT__') # get all volumes having volume_type=None total = 0 updated = 0 session = get_session() with session.begin(): total = model_query(context, models.Volume, session=session).filter_by( volume_type_id=None).limit(max_count).count() volumes = model_query(context, models.Volume, session=session).filter_by( volume_type_id=None).limit(max_count).all() for volume in volumes: volume.volume_type_id = default_type.get('id') updated += 1 return total, updated @enginefacade.writer def untyped_snapshots_online_data_migration(context, max_count): from cinder.volume import volume_types default_type = volume_types.get_volume_type_by_name(context, '__DEFAULT__') # get all snapshots having volume_type=None total = 0 updated = 0 session = get_session() with session.begin(): total = model_query(context, models.Snapshot, session=session).filter_by( volume_type_id=None).limit(max_count).count() snapshots = model_query(context, models.Snapshot, session=session).filter_by( volume_type_id=None).limit(max_count).all() for snapshot in snapshots: snapshot.volume_type_id = default_type.get('id') updated += 1 return total, updated ################### @require_admin_context def is_backend_frozen(context, host, cluster_name): """Check if a storage backend is frozen based on host and cluster_name.""" if cluster_name: model = models.Cluster conditions = [model.name == volume_utils.extract_host(cluster_name)] else: model = models.Service conditions = [model.host == volume_utils.extract_host(host)] conditions.extend((~model.deleted, model.frozen)) query = get_session().query(sql.exists().where(and_(*conditions))) frozen = query.scalar() return frozen ################### def _cluster_query(context, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, name=None, session=None, **filters): filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Cluster, filters): return None query = model_query(context, models.Cluster, session=session, read_deleted=read_deleted) # Cluster is a special case of filter, because we must match exact match # as well as hosts that specify the backend if name: query = query.filter(_filter_host(models.Cluster.name, name, name_match_level)) if filters: query = query.filter_by(**filters) if services_summary: query = query.options(undefer_group('services_summary')) # We bind the expiration time to now (as it changes with each query) # and is required by num_down_hosts query = query.params(expired=utils.service_expired_time()) elif 'num_down_hosts' in filters: query = query.params(expired=utils.service_expired_time()) if get_services: query = query.options(joinedload_all('services')) if is_up is not None: date_limit = utils.service_expired_time() filter_ = and_(models.Cluster.last_heartbeat.isnot(None), models.Cluster.last_heartbeat >= date_limit) query = query.filter(filter_ == is_up) return query @require_admin_context def cluster_get(context, id=None, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, **filters): """Get a cluster that matches the criteria. :param id: Id of the cluster. :param is_up: Boolean value to filter based on the cluster's up status. :param get_services: If we want to load all services from this cluster. :param services_summary: If we want to load num_hosts and num_down_hosts fields. :param read_deleted: Filtering based on delete status. Default value is "no". :param filters: Field based filters in the form of key/value. :param name_match_level: 'pool', 'backend', or 'host' for name filter (as defined in _filter_host method) :raise ClusterNotFound: If cluster doesn't exist. """ query = _cluster_query(context, is_up, get_services, services_summary, read_deleted, name_match_level, id=id, **filters) cluster = None if not query else query.first() if not cluster: cluster_id = id or six.text_type(filters) raise exception.ClusterNotFound(id=cluster_id) return cluster @require_admin_context def cluster_get_all(context, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, **filters): """Get all clusters that match the criteria. :param is_up: Boolean value to filter based on the cluster's up status. :param get_services: If we want to load all services from this cluster. :param services_summary: If we want to load num_hosts and num_down_hosts fields. :param read_deleted: Filtering based on delete status. Default value is "no". :param name_match_level: 'pool', 'backend', or 'host' for name filter (as defined in _filter_host method) :param filters: Field based filters in the form of key/value. """ query = _cluster_query(context, is_up, get_services, services_summary, read_deleted, name_match_level, **filters) return [] if not query else query.all() @require_admin_context def cluster_create(context, values): """Create a cluster from the values dictionary.""" cluster_ref = models.Cluster() cluster_ref.update(values) # Provided disabled value takes precedence if values.get('disabled') is None: cluster_ref.disabled = not CONF.enable_new_services session = get_session() try: with session.begin(): cluster_ref.save(session) # We mark that newly created cluster has no hosts to prevent # problems at the OVO level cluster_ref.last_heartbeat = None return cluster_ref # If we had a race condition (another non deleted cluster exists with the # same name) raise Duplicate exception. except db_exc.DBDuplicateEntry: raise exception.ClusterExists(name=values.get('name')) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def cluster_update(context, id, values): """Set the given properties on an cluster and update it. Raises ClusterNotFound if cluster does not exist. """ query = _cluster_query(context, id=id) result = query.update(values) if not result: raise exception.ClusterNotFound(id=id) @require_admin_context def cluster_destroy(context, id): """Destroy the cluster or raise if it does not exist or has hosts.""" query = _cluster_query(context, id=id) query = query.filter(models.Cluster.num_hosts == 0) # If the update doesn't succeed we don't know if it's because the # cluster doesn't exist or because it has hosts. result = query.update(models.Cluster.delete_values(), synchronize_session=False) if not result: # This will fail if the cluster doesn't exist raising the right # exception cluster_get(context, id=id) # If it doesn't fail, then the problem is that there are hosts raise exception.ClusterHasHosts(id=id) ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.items(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs def _dict_with_extra_specs_if_authorized(context, inst_type_query): """Convert type query result to dict with extra_spec and rate_limit. Takes a volume type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts. NOTE the contents of extra-specs are admin readable only. If the context passed in for this request is not admin then we will return an empty extra-specs dict rather than providing the admin only details. Example response with admin context: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = {x['key']: x['value'] for x in inst_type_query['extra_specs']} inst_type_dict['extra_specs'] = extra_specs return inst_type_dict ################### def _dict_with_group_specs_if_authorized(context, inst_type_query): """Convert group type query result to dict with spec and rate_limit. Takes a group type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts. NOTE the contents of extra-specs are admin readable only. If the context passed in for this request is not admin then we will return an empty extra-specs dict rather than providing the admin only details. Example response with admin context: 'group_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'group_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) if not is_admin_context(context): del(inst_type_dict['group_specs']) else: group_specs = {x['key']: x['value'] for x in inst_type_query['group_specs']} inst_type_dict['group_specs'] = group_specs return inst_type_dict ################### @require_context def _quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get(context, project_id, resource): return _quota_get(context, project_id, resource) @require_context def quota_get_all_by_project(context, project_id): rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_allocated_get_all_by_project(context, project_id, session=None): rows = model_query(context, models.Quota, read_deleted='no', session=session).filter_by(project_id=project_id).all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.allocated return result @require_context def _quota_get_all_by_resource(context, resource, session=None): rows = model_query(context, models.Quota, session=session, read_deleted='no').filter_by( resource=resource).all() return rows @require_context def quota_create(context, project_id, resource, limit, allocated): quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit if allocated: quota_ref.allocated = allocated session = get_session() with session.begin(): quota_ref.save(session) return quota_ref @require_context def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit return quota_ref @require_context def quota_update_resource(context, old_res, new_res): session = get_session() with session.begin(): quotas = _quota_get_all_by_resource(context, old_res, session=session) for quota in quotas: quota.resource = new_res @require_admin_context def quota_allocated_update(context, project_id, resource, allocated): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.allocated = allocated return quota_ref @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) return quota_ref.delete(session=session) ################### @require_context def _quota_class_get(context, class_name, resource, session=None): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @require_context def quota_class_get(context, class_name, resource): return _quota_class_get(context, class_name, resource) def quota_class_get_defaults(context): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=_DEFAULT_QUOTA_NAME).all() result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_class_get_all_by_name(context, class_name): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_context def _quota_class_get_all_by_resource(context, resource, session): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(resource=resource).\ all() return result @handle_db_data_error @require_context def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit session = get_session() with session.begin(): quota_class_ref.save(session) return quota_class_ref @require_context def quota_class_update(context, class_name, resource, limit): session = get_session() with session.begin(): quota_class_ref = _quota_class_get(context, class_name, resource, session=session) quota_class_ref.hard_limit = limit return quota_class_ref @require_context def quota_class_update_resource(context, old_res, new_res): session = get_session() with session.begin(): quota_class_list = _quota_class_get_all_by_resource( context, old_res, session) for quota_class in quota_class_list: quota_class.resource = new_res @require_context def quota_class_destroy(context, class_name, resource): session = get_session() with session.begin(): quota_class_ref = _quota_class_get(context, class_name, resource, session=session) return quota_class_ref.delete(session=session) @require_context def quota_class_destroy_all_by_name(context, class_name): session = get_session() with session.begin(): quota_classes = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ all() for quota_class_ref in quota_classes: quota_class_ref.delete(session=session) ################### @require_context def quota_usage_get(context, project_id, resource): result = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result @require_context def quota_usage_get_all_by_project(context, project_id): rows = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result @require_admin_context def _quota_usage_create(context, project_id, resource, in_use, reserved, until_refresh, session=None): quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.save(session=session) return quota_usage_ref ################### def _reservation_create(context, uuid, usage, project_id, resource, delta, expire, session=None, allocated_id=None): usage_id = usage['id'] if usage else None reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage_id reservation_ref.project_id = project_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.allocated_id = allocated_id reservation_ref.save(session=session) return reservation_ref ################### # NOTE(johannes): The quota code uses SQL locking to ensure races don't # cause under or over counting of resources. To avoid deadlocks, this # code always acquires the lock on quota_usages before acquiring the lock # on reservations. def _get_quota_usages(context, session, project_id, resources=None): # Broken out for testability query = model_query(context, models.QuotaUsage, read_deleted="no", session=session).filter_by(project_id=project_id) if resources: query = query.filter(models.QuotaUsage.resource.in_(list(resources))) rows = query.order_by(models.QuotaUsage.id.asc()).\ with_for_update().all() return {row.resource: row for row in rows} def _get_quota_usages_by_resource(context, session, resource): rows = model_query(context, models.QuotaUsage, deleted="no", session=session).\ filter_by(resource=resource).\ order_by(models.QuotaUsage.id.asc()).\ with_for_update().\ all() return rows @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_usage_update_resource(context, old_res, new_res): session = get_session() with session.begin(): usages = _get_quota_usages_by_resource(context, session, old_res) for usage in usages: usage.resource = new_res usage.until_refresh = 1 @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None, is_allocated_reserve=False): elevated = context.elevated() session = get_session() with session.begin(): if project_id is None: project_id = context.project_id # Get the current usages usages = _get_quota_usages(context, session, project_id, resources=deltas.keys()) allocated = quota_allocated_get_all_by_project(context, project_id, session=session) allocated.pop('project_id') # Handle usage refresh work = set(deltas.keys()) while work: resource = work.pop() # Do we need to refresh the usage? refresh = False if resource not in usages: usages[resource] = _quota_usage_create(elevated, project_id, resource, 0, 0, until_refresh or None, session=session) refresh = True elif usages[resource].in_use < 0: # Negative in_use count indicates a desync, so try to # heal from that... refresh = True elif usages[resource].until_refresh is not None: usages[resource].until_refresh -= 1 if usages[resource].until_refresh <= 0: refresh = True elif max_age and usages[resource].updated_at is not None and ( (timeutils.utcnow() - usages[resource].updated_at).total_seconds() >= max_age): refresh = True # OK, refresh the usage if refresh: # Grab the sync routine sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] volume_type_id = getattr(resources[resource], 'volume_type_id', None) volume_type_name = getattr(resources[resource], 'volume_type_name', None) updates = sync(elevated, project_id, volume_type_id=volume_type_id, volume_type_name=volume_type_name, session=session) for res, in_use in updates.items(): # Make sure we have a destination for the usage! if res not in usages: usages[res] = _quota_usage_create( elevated, project_id, res, 0, 0, until_refresh or None, session=session ) # Update the usage usages[res].in_use = in_use usages[res].until_refresh = until_refresh or None # Because more than one resource may be refreshed # by the call to the sync routine, and we don't # want to double-sync, we make sure all refreshed # resources are dropped from the work set. work.discard(res) # NOTE(Vek): We make the assumption that the sync # routine actually refreshes the # resources that it is the sync routine # for. We don't check, because this is # a best-effort mechanism. # Check for deltas that would go negative if is_allocated_reserve: unders = [r for r, delta in deltas.items() if delta < 0 and delta + allocated.get(r, 0) < 0] else: unders = [r for r, delta in deltas.items() if delta < 0 and delta + usages[r].in_use < 0] # TODO(mc_nair): Should ignore/zero alloc if using non-nested driver # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. overs = [r for r, delta in deltas.items() if quotas[r] >= 0 and delta >= 0 and quotas[r] < delta + usages[r].total + allocated.get(r, 0)] # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because # we're over quota, so the OverQuota raise is # outside the transaction. If we did the raise # here, our usage updates would be discarded, but # they're not invalidated by being over-quota. # Create the reservations if not overs: reservations = [] for resource, delta in deltas.items(): usage = usages[resource] allocated_id = None if is_allocated_reserve: try: quota = _quota_get(context, project_id, resource, session=session) except exception.ProjectQuotaNotFound: # If we were using the default quota, create DB entry quota = quota_create(context, project_id, resource, quotas[resource], 0) # Since there's no reserved/total for allocated, update # allocated immediately and subtract on rollback if needed quota_allocated_update(context, project_id, resource, quota.allocated + delta) allocated_id = quota.id usage = None reservation = _reservation_create( elevated, str(uuid.uuid4()), usage, project_id, resource, delta, expire, session=session, allocated_id=allocated_id) reservations.append(reservation.uuid) # Also update the reserved quantity # NOTE(Vek): Again, we are only concerned here about # positive increments. Here, though, we're # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0 and not is_allocated_reserve: usages[resource].reserved += delta if unders: LOG.warning("Change will make usage less than 0 for the following " "resources: %s", unders) if overs: usages = {k: dict(in_use=v.in_use, reserved=v.reserved, allocated=allocated.get(k, 0)) for k, v in usages.items()} raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages=usages) return reservations def _quota_reservations(session, context, reservations): """Return the relevant reservations.""" # Get the listed reservations return model_query(context, models.Reservation, read_deleted="no", session=session).\ filter(models.Reservation.uuid.in_(reservations)).\ with_for_update().\ all() def _get_reservation_resources(session, context, reservation_ids): """Return the relevant resources by reservations.""" reservations = model_query(context, models.Reservation, read_deleted="no", session=session).\ options(load_only('resource')).\ filter(models.Reservation.uuid.in_(reservation_ids)).\ all() return {r.resource for r in reservations} def _dict_with_usage_id(usages): return {row.id: row for row in usages.values()} @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_commit(context, reservations, project_id=None): session = get_session() with session.begin(): usages = _get_quota_usages( context, session, project_id, resources=_get_reservation_resources(session, context, reservations)) usages = _dict_with_usage_id(usages) for reservation in _quota_reservations(session, context, reservations): # Allocated reservations will have already been bumped if not reservation.allocated_id: usage = usages[reservation.usage_id] if reservation.delta >= 0: usage.reserved -= reservation.delta usage.in_use += reservation.delta reservation.delete(session=session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_rollback(context, reservations, project_id=None): session = get_session() with session.begin(): usages = _get_quota_usages( context, session, project_id, resources=_get_reservation_resources(session, context, reservations)) usages = _dict_with_usage_id(usages) for reservation in _quota_reservations(session, context, reservations): if reservation.allocated_id: reservation.quota.allocated -= reservation.delta else: usage = usages[reservation.usage_id] if reservation.delta >= 0: usage.reserved -= reservation.delta reservation.delete(session=session) def quota_destroy_by_project(*args, **kwargs): """Destroy all limit quotas associated with a project. Leaves usage and reservation quotas intact. """ quota_destroy_all_by_project(only_quotas=True, *args, **kwargs) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_destroy_all_by_project(context, project_id, only_quotas=False): """Destroy all quotas associated with a project. This includes limit quotas, usage quotas and reservation quotas. Optionally can only remove limit quotas and leave other types as they are. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param only_quotas: Only delete limit quotas, leave other types intact. """ session = get_session() with session.begin(): quotas = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_ref in quotas: quota_ref.delete(session=session) if only_quotas: return quota_usages = model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_usage_ref in quota_usages: quota_usage_ref.delete(session=session) reservations = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for reservation_ref in reservations: reservation_ref.delete(session=session) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_expire(context): session = get_session() with session.begin(): current_time = timeutils.utcnow() results = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter(models.Reservation.expire < current_time).\ all() if results: for reservation in results: if reservation.delta >= 0: if reservation.allocated_id: reservation.quota.allocated -= reservation.delta reservation.quota.save(session=session) else: reservation.usage.reserved -= reservation.delta reservation.usage.save(session=session) reservation.delete(session=session) ################### @require_admin_context def volume_attach(context, values): volume_attachment_ref = models.VolumeAttachment() if not values.get('id'): values['id'] = str(uuid.uuid4()) volume_attachment_ref.update(values) session = get_session() with session.begin(): volume_attachment_ref.save(session=session) return _attachment_get(context, values['id'], session=session) @require_admin_context def volume_attached(context, attachment_id, instance_uuid, host_name, mountpoint, attach_mode, mark_attached): """This method updates a volume attachment entry. This function saves the information related to a particular attachment for a volume. It also updates the volume record to mark the volume as attached or attaching. The mark_attached argument is a boolean, when set to True, we mark the volume as 'in-use' and the 'attachment' as 'attached', if False, we use 'attaching' for both of these status settings. """ attach_status = fields.VolumeAttachStatus.ATTACHED volume_status = 'in-use' if not mark_attached: attach_status = fields.VolumeAttachStatus.ATTACHING volume_status = 'attaching' if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) session = get_session() with session.begin(): volume_attachment_ref = _attachment_get(context, attachment_id, session=session) updated_values = {'mountpoint': mountpoint, 'attach_status': attach_status, 'instance_uuid': instance_uuid, 'attached_host': host_name, 'attach_time': timeutils.utcnow(), 'attach_mode': attach_mode, 'updated_at': literal_column('updated_at')} volume_attachment_ref.update(updated_values) volume_attachment_ref.save(session=session) del updated_values['updated_at'] volume_ref = _volume_get(context, volume_attachment_ref['volume_id'], session=session) volume_ref['status'] = volume_status volume_ref['attach_status'] = attach_status volume_ref.save(session=session) return (volume_ref, updated_values) @handle_db_data_error @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) if is_admin_context(context): values['volume_admin_metadata'] = \ _metadata_refs(values.get('admin_metadata'), models.VolumeAdminMetadata) elif values.get('volume_admin_metadata'): del values['volume_admin_metadata'] volume_ref = models.Volume() if not values.get('id'): values['id'] = str(uuid.uuid4()) volume_ref.update(values) session = get_session() with session.begin(): session.add(volume_ref) return _volume_get(context, values['id'], session=session) def get_booleans_for_table(table_name): booleans = set() table = getattr(models, table_name.capitalize()) if hasattr(table, '__table__'): columns = table.__table__.columns for column in columns: if isinstance(column.type, sqltypes.Boolean): booleans.add(column.name) return booleans @require_admin_context def volume_data_get_for_host(context, host, count_only=False): host_attr = models.Volume.host conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] if count_only: result = model_query(context, func.count(models.Volume.id), read_deleted="no").filter( or_(*conditions)).first() return result[0] or 0 else: result = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no").filter( or_(*conditions)).first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def _volume_data_get_for_project(context, project_id, volume_type_id=None, session=None, host=None): query = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if host: query = query.filter(_filter_host(models.Volume.host, host)) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def _backup_data_get_for_project(context, project_id, volume_type_id=None, session=None): query = model_query(context, func.count(models.Backup.id), func.sum(models.Backup.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_data_get_for_project(context, project_id, volume_type_id=None, host=None): return _volume_data_get_for_project(context, project_id, volume_type_id, host=host) VOLUME_DEPENDENT_MODELS = frozenset([models.VolumeMetadata, models.VolumeAdminMetadata, models.Transfer, models.VolumeGlanceMetadata, models.VolumeAttachment]) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_destroy(context, volume_id): session = get_session() now = timeutils.utcnow() updated_values = {'status': 'deleted', 'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at'), 'migration_status': None} with session.begin(): model_query(context, models.Volume, session=session).\ filter_by(id=volume_id).\ update(updated_values) for model in VOLUME_DEPENDENT_MODELS: model_query(context, model, session=session).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values def _include_in_cluster(context, cluster, model, partial_rename, filters): """Generic include in cluster method. When we include resources in a cluster we have to be careful to preserve the addressing sections that have not been provided. That's why we allow partial_renaming, so we can preserve the backend and pool if we are only providing host/cluster level information, and preserve pool information if we only provide backend level information. For example when we include a host in a cluster we receive calls with filters like {'host': 'localhost@lvmdriver-1'} and cluster with something like 'mycluster@lvmdriver-1'. Since in the DB the resources will have the host field set to something like 'localhost@lvmdriver-1#lvmdriver-1' we want to include original pool in the new cluster_name. So we want to store in cluster_name value 'mycluster@lvmdriver-1#lvmdriver-1'. """ filters = _clean_filters(filters) if filters and not is_valid_model_filters(model, filters): return None query = get_session().query(model) if hasattr(model, 'deleted'): query = query.filter_by(deleted=False) # cluster_name and host are special filter cases for field in {'cluster_name', 'host'}.intersection(filters): value = filters.pop(field) # We do a special backend filter query = query.filter(_filter_host(getattr(model, field), value)) # If we want to do a partial rename and we haven't set the cluster # already, the value we want to set is a SQL replace of existing field # value. if partial_rename and isinstance(cluster, six.string_types): cluster = func.replace(getattr(model, field), value, cluster) query = query.filter_by(**filters) result = query.update({'cluster_name': cluster}, synchronize_session=False) return result @require_admin_context def volume_include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all volumes matching the filters into a cluster.""" return _include_in_cluster(context, cluster, models.Volume, partial_rename, filters) @require_admin_context def volume_detached(context, volume_id, attachment_id): """This updates a volume attachment and marks it as detached. This method also ensures that the volume entry is correctly marked as either still attached/in-use or detached/available if this was the last detachment made. """ # NOTE(jdg): This is a funky band-aid for the earlier attempts at # multiattach, it's a bummer because these things aren't really being used # but at the same time we don't want to break them until we work out the # new proposal for multi-attach remain_attachment = True session = get_session() with session.begin(): try: attachment = _attachment_get(context, attachment_id, session=session) except exception.VolumeAttachmentNotFound: attachment_updates = None attachment = None if attachment: now = timeutils.utcnow() attachment_updates = { 'attach_status': fields.VolumeAttachStatus.DETACHED, 'detach_time': now, 'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at'), } attachment.update(attachment_updates) attachment.save(session=session) del attachment_updates['updated_at'] attachment_list = None volume_ref = _volume_get(context, volume_id, session=session) volume_updates = {'updated_at': literal_column('updated_at')} if not volume_ref.volume_attachment: # NOTE(jdg): We kept the old arg style allowing session exclusively # for this one call attachment_list = volume_attachment_get_all_by_volume_id( context, volume_id, session=session) remain_attachment = False if attachment_list and len(attachment_list) > 0: remain_attachment = True if not remain_attachment: # Hide status update from user if we're performing volume migration # or uploading it to image if ((not volume_ref.migration_status and not (volume_ref.status == 'uploading')) or volume_ref.migration_status in ('success', 'error')): volume_updates['status'] = 'available' volume_updates['attach_status'] = ( fields.VolumeAttachStatus.DETACHED) else: # Volume is still attached volume_updates['status'] = 'in-use' volume_updates['attach_status'] = ( fields.VolumeAttachStatus.ATTACHED) volume_ref.update(volume_updates) volume_ref.save(session=session) del volume_updates['updated_at'] return (volume_updates, attachment_updates) def _process_model_like_filter(model, query, filters): """Applies regex expression filtering to a query. :param model: model to apply filters to :param query: query to apply filters to :param filters: dictionary of filters with regex values :returns: the updated query. """ if query is None: return query for key in sorted(filters): column_attr = getattr(model, key) if 'property' == type(column_attr).__name__: continue value = filters[key] if not (isinstance(value, (six.string_types, int))): continue query = query.filter( column_attr.op('LIKE')(u'%%%s%%' % value)) return query def apply_like_filters(model): def decorator_filters(process_exact_filters): def _decorator(query, filters): exact_filters = filters.copy() regex_filters = {} for key, value in filters.items(): # NOTE(tommylikehu): For inexact match, the filter keys # are in the format of 'key~=value' if key.endswith('~'): exact_filters.pop(key) regex_filters[key.rstrip('~')] = value query = process_exact_filters(query, exact_filters) return _process_model_like_filter(model, query, regex_filters) return _decorator return decorator_filters @require_context def _volume_get_query(context, session=None, project_only=False, joined_load=True): """Get the query to retrieve the volume. :param context: the context used to run the method _volume_get_query :param session: the session to use :param project_only: the boolean used to decide whether to query the volume in the current project or all projects :param joined_load: the boolean used to decide whether the query loads the other models, which join the volume model in the database. Currently, the False value for this parameter is specially for the case of updating database during volume migration :returns: updated query or None """ if not joined_load: return model_query(context, models.Volume, session=session, project_only=project_only) if is_admin_context(context): return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_admin_metadata')).\ options(joinedload('volume_type')).\ options(joinedload('volume_attachment')).\ options(joinedload('consistencygroup')).\ options(joinedload('group')) else: return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ options(joinedload('volume_attachment')).\ options(joinedload('consistencygroup')).\ options(joinedload('group')) @require_context def _volume_get(context, volume_id, session=None, joined_load=True): result = _volume_get_query(context, session=session, project_only=True, joined_load=joined_load) if joined_load: result = result.options(joinedload('volume_type.extra_specs')) result = result.filter_by(id=volume_id).first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result def _attachment_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.VolumeAttachment, filters, exclude_list=['project_id']): return [] session = get_session() with session.begin(): # Generate the paginate query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.VolumeAttachment) if query is None: return [] return query.all() def _attachment_get(context, attachment_id, session=None, read_deleted=False, project_only=True): result = (model_query(context, models.VolumeAttachment, session=session, read_deleted=read_deleted) .filter_by(id=attachment_id) .options(joinedload('volume')) .first()) if not result: raise exception.VolumeAttachmentNotFound(filter='attachment_id = %s' % attachment_id) return result def _attachment_get_query(context, session=None, project_only=False): return model_query(context, models.VolumeAttachment, session=session, project_only=project_only).options(joinedload('volume')) @apply_like_filters(model=models.VolumeAttachment) def _process_attachment_filters(query, filters): if filters: project_id = filters.pop('project_id', None) # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.VolumeAttachment, filters): return if project_id: volume = models.Volume query = query.filter(volume.id == models.VolumeAttachment.volume_id, volume.project_id == project_id) query = query.filter_by(**filters) return query @require_admin_context def volume_attachment_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Retrieve all Attachment records with filter and pagination options.""" return _attachment_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def volume_attachment_get_all_by_volume_id(context, volume_id, session=None): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(volume_id=volume_id).\ filter(models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED). \ options(joinedload('volume')).\ all() return result @require_context def volume_attachment_get_all_by_host(context, host): session = get_session() with session.begin(): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(attached_host=host).\ filter(models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED). \ options(joinedload('volume')).\ all() return result @require_context def volume_attachment_get(context, attachment_id): """Fetch the specified attachment record.""" return _attachment_get(context, attachment_id) @require_context def volume_attachment_get_all_by_instance_uuid(context, instance_uuid): """Fetch all attachment records associated with the specified instance.""" session = get_session() with session.begin(): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(instance_uuid=instance_uuid).\ filter(models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED).\ options(joinedload('volume')).\ all() return result @require_context def volume_attachment_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Retrieve all Attachment records for specific project.""" authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _attachment_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def attachment_destroy(context, attachment_id): """Destroy the specified attachment record.""" utcnow = timeutils.utcnow() session = get_session() with session.begin(): updated_values = {'attach_status': fields.VolumeAttachStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} model_query(context, models.VolumeAttachment, session=session).\ filter_by(id=attachment_id).\ update(updated_values) model_query(context, models.AttachmentSpecs, session=session).\ filter_by(attachment_id=attachment_id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values def attachment_specs_exist(context): query = model_query(context, models.AttachmentSpecs, read_deleted='no') return bool(query.first()) def _attachment_specs_query(context, attachment_id, session=None): return model_query(context, models.AttachmentSpecs, session=session, read_deleted="no").\ filter_by(attachment_id=attachment_id) @require_context def attachment_specs_get(context, attachment_id): """DEPRECATED: Fetch the attachment_specs for the specified attachment.""" rows = _attachment_specs_query(context, attachment_id).\ all() result = {row['key']: row['value'] for row in rows} return result @require_context def attachment_specs_delete(context, attachment_id, key): """DEPRECATED: Delete attachment_specs for the specified attachment.""" session = get_session() with session.begin(): _attachment_specs_get_item(context, attachment_id, key, session) _attachment_specs_query(context, attachment_id, session).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _attachment_specs_get_item(context, attachment_id, key, session=None): result = _attachment_specs_query( context, attachment_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.AttachmentSpecsNotFound( specs_key=key, attachment_id=attachment_id) return result @handle_db_data_error @require_context def attachment_specs_update_or_create(context, attachment_id, specs): """DEPRECATED: Update attachment_specs for the specified attachment.""" session = get_session() with session.begin(): spec_ref = None for key, value in specs.items(): try: spec_ref = _attachment_specs_get_item( context, attachment_id, key, session) except exception.AttachmentSpecsNotFound: spec_ref = models.AttachmentSpecs() spec_ref.update({"key": key, "value": value, "attachment_id": attachment_id, "deleted": False}) spec_ref.save(session=session) return specs @require_context def volume_get(context, volume_id): return _volume_get(context, volume_id) @require_admin_context def volume_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volumes. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset) # No volumes would match, return empty list if query is None: return [] return query.all() @require_context def get_volume_summary(context, project_only, filters=None): """Retrieves all volumes summary. :param context: context to query under :param project_only: limit summary to project volumes :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: volume summary """ if not (project_only or is_admin_context(context)): raise exception.AdminRequired() query = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no") if project_only: query = query.filter_by(project_id=context.project_id) if filters: query = _process_volume_filters(query, filters) if query is None: return [] result = query.first() query_metadata = model_query( context, models.VolumeMetadata.key, models.VolumeMetadata.value, read_deleted="no") if project_only: query_metadata = query_metadata.join( models.Volume, models.Volume.id == models.VolumeMetadata.volume_id).filter_by( project_id=context.project_id) result_metadata = query_metadata.distinct().all() result_metadata_list = collections.defaultdict(list) for key, value in result_metadata: result_metadata_list[key].append(value) return (result[0] or 0, result[1] or 0, result_metadata_list) @require_admin_context def volume_get_all_by_host(context, host, filters=None): """Retrieves all volumes hosted on a host. :param context: context to query under :param host: host for all volumes being retrieved :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ # As a side effect of the introduction of pool-aware scheduler, # newly created volumes will have pool information appended to # 'host' field of a volume record. So a volume record in DB can # now be either form below: # Host # Host#Pool if host and isinstance(host, six.string_types): session = get_session() with session.begin(): host_attr = getattr(models.Volume, 'host') conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] query = _volume_get_query(context).filter(or_(*conditions)) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() elif not host: return [] @require_context def volume_get_all_by_group(context, group_id, filters=None): """Retrieves all volumes associated with the group_id. :param context: context to query under :param group_id: consistency group ID for all volumes being retrieved :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ query = _volume_get_query(context).filter_by(consistencygroup_id=group_id) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() @require_context def volume_get_all_by_generic_group(context, group_id, filters=None): """Retrieves all volumes associated with the group_id. :param context: context to query under :param group_id: group ID for all volumes being retrieved :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ query = _volume_get_query(context).filter_by(group_id=group_id) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() @require_context def volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volumes in a project. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param project_id: project for all volumes being retrieved :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ session = get_session() with session.begin(): authorize_project_context(context, project_id) # Add in the project filter without modifying the given filters filters = filters.copy() if filters else {} filters['project_id'] = project_id # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset) # No volumes would match, return empty list if query is None: return [] return query.all() def _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset=None, paginate_type=models.Volume): """Generate the query to include the filters and the paginate options. Returns a query with sorting / pagination criteria added or None if the given filters will not yield any results. :param context: context to query under :param session: the session to use :param marker: the last item of the previous page; we returns the next results after this value. :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :param offset: number of items to skip :param paginate_type: type of pagination to generate :returns: updated query or None """ get_query, process_filters, get = PAGINATION_HELPERS[paginate_type] sort_keys, sort_dirs = process_sort_params(sort_keys, sort_dirs, default_dir='desc') query = get_query(context, session=session) if filters: query = process_filters(query, filters) if query is None: return None marker_object = None if marker is not None: marker_object = get(context, marker, session) return sqlalchemyutils.paginate_query(query, paginate_type, limit, sort_keys, marker=marker_object, sort_dirs=sort_dirs, offset=offset) def calculate_resource_count(context, resource_type, filters): """Calculate total count with filters applied""" session = get_session() if resource_type not in CALCULATE_COUNT_HELPERS.keys(): raise exception.InvalidInput( reason=_("Model %s doesn't support " "counting resource.") % resource_type) get_query, process_filters = CALCULATE_COUNT_HELPERS[resource_type] query = get_query(context, session=session) if filters: query = process_filters(query, filters) if query is None: return 0 return query.with_entities(func.count()).scalar() @apply_like_filters(model=models.Volume) def _process_volume_filters(query, filters): """Common filter processing for Volume queries. Filter values that are in lists, tuples, or sets cause an 'IN' operator to be used, while exact matching ('==' operator) is used for other values. A filter key/value of 'no_migration_targets'=True causes volumes with either a NULL 'migration_status' or a 'migration_status' that does not start with 'target:' to be retrieved. A 'metadata' filter key must correspond to a dictionary value of metadata key-value pairs. :param query: Model query to use :param filters: dictionary of filters :returns: updated query or None """ filters = filters.copy() # 'no_migration_targets' is unique, must be either NULL or # not start with 'target:' if filters.get('no_migration_targets', False): filters.pop('no_migration_targets') try: column_attr = getattr(models.Volume, 'migration_status') conditions = [column_attr == None, # noqa column_attr.op('NOT LIKE')('target:%')] query = query.filter(or_(*conditions)) except AttributeError: LOG.debug("'migration_status' column could not be found.") return None host = filters.pop('host', None) if host: query = query.filter(_filter_host(models.Volume.host, host)) cluster_name = filters.pop('cluster_name', None) if cluster_name: query = query.filter(_filter_host(models.Volume.cluster_name, cluster_name)) for time_comparison_filter in ['created_at', 'updated_at']: if filters.get(time_comparison_filter, None): time_filter_dict = filters.pop(time_comparison_filter) try: time_filter_attr = getattr(models.Volume, time_comparison_filter) query = query.filter(_filter_time_comparison(time_filter_attr, time_filter_dict)) except AttributeError: LOG.debug("%s column could not be found.", time_comparison_filter) return None # Apply exact match filters for everything else, ensure that the # filter value exists on the model for key in filters.keys(): # metadata/glance_metadata is unique, must be a dict if key in ('metadata', 'glance_metadata'): if not isinstance(filters[key], dict): LOG.debug("'%s' filter value is not valid.", key) return None continue try: column_attr = getattr(models.Volume, key) # Do not allow relationship properties since those require # schema specific knowledge prop = getattr(column_attr, 'property') if isinstance(prop, RelationshipProperty): LOG.debug(("'%s' filter key is not valid, " "it maps to a relationship."), key) return None except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return None # Holds the simple exact matches filter_dict = {} # Iterate over all filters, special case the filter if necessary for key, value in filters.items(): if key == 'metadata': # model.VolumeMetadata defines the backref to Volumes as # 'volume_metadata' or 'volume_admin_metadata', use those as # column attribute keys col_attr = getattr(models.Volume, 'volume_metadata') col_ad_attr = getattr(models.Volume, 'volume_admin_metadata') for k, v in value.items(): query = query.filter(or_(col_attr.any(key=k, value=v), col_ad_attr.any(key=k, value=v))) elif key == 'glance_metadata': # use models.Volume.volume_glance_metadata as column attribute key. col_gl_attr = models.Volume.volume_glance_metadata for k, v in value.items(): query = query.filter(col_gl_attr.any(key=k, value=v)) elif isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(models.Volume, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query def process_sort_params(sort_keys, sort_dirs, default_keys=None, default_dir='asc'): """Process the sort parameters to include default keys. Creates a list of sort keys and a list of sort directions. Adds the default keys to the end of the list if they are not already included. When adding the default keys to the sort keys list, the associated direction is: 1) The first element in the 'sort_dirs' list (if specified), else 2) 'default_dir' value (Note that 'asc' is the default value since this is the default in sqlalchemy.utils.paginate_query) :param sort_keys: List of sort keys to include in the processed list :param sort_dirs: List of sort directions to include in the processed list :param default_keys: List of sort keys that need to be included in the processed list, they are added at the end of the list if not already specified. :param default_dir: Sort direction associated with each of the default keys that are not supplied, used when they are added to the processed list :returns: list of sort keys, list of sort directions :raise exception.InvalidInput: If more sort directions than sort keys are specified or if an invalid sort direction is specified """ if default_keys is None: default_keys = ['created_at', 'id'] # Determine direction to use for when adding default keys if sort_dirs and len(sort_dirs): default_dir_value = sort_dirs[0] else: default_dir_value = default_dir # Create list of keys (do not modify the input list) if sort_keys: result_keys = list(sort_keys) else: result_keys = [] # If a list of directions is not provided, use the default sort direction # for all provided keys. if sort_dirs: result_dirs = [] # Verify sort direction for sort_dir in sort_dirs: if sort_dir not in ('asc', 'desc'): msg = _("Unknown sort direction, must be 'desc' or 'asc'.") raise exception.InvalidInput(reason=msg) result_dirs.append(sort_dir) else: result_dirs = [default_dir_value for _sort_key in result_keys] # Ensure that the key and direction length match while len(result_dirs) < len(result_keys): result_dirs.append(default_dir_value) # Unless more direction are specified, which is an error if len(result_dirs) > len(result_keys): msg = _("Sort direction array size exceeds sort key array size.") raise exception.InvalidInput(reason=msg) # Ensure defaults are included for key in default_keys: if key not in result_keys: result_keys.append(key) result_dirs.append(default_dir_value) return result_keys, result_dirs @handle_db_data_error @require_context def volume_update(context, volume_id, values): session = get_session() with session.begin(): metadata = values.get('metadata') if metadata is not None: _volume_user_metadata_update(context, volume_id, values.pop('metadata'), delete=True, session=session) admin_metadata = values.get('admin_metadata') if is_admin_context(context) and admin_metadata is not None: _volume_admin_metadata_update(context, volume_id, values.pop('admin_metadata'), delete=True, session=session) query = _volume_get_query(context, session, joined_load=False) result = query.filter_by(id=volume_id).update(values) if not result: raise exception.VolumeNotFound(volume_id=volume_id) @handle_db_data_error @require_context def volumes_update(context, values_list): session = get_session() with session.begin(): volume_refs = [] for values in values_list: volume_id = values['id'] values.pop('id') metadata = values.get('metadata') if metadata is not None: _volume_user_metadata_update(context, volume_id, values.pop('metadata'), delete=True, session=session) admin_metadata = values.get('admin_metadata') if is_admin_context(context) and admin_metadata is not None: _volume_admin_metadata_update(context, volume_id, values.pop('admin_metadata'), delete=True, session=session) volume_ref = _volume_get(context, volume_id, session=session) volume_ref.update(values) volume_refs.append(volume_ref) return volume_refs @require_context def volume_attachment_update(context, attachment_id, values): query = model_query(context, models.VolumeAttachment) result = query.filter_by(id=attachment_id).update(values) if not result: raise exception.VolumeAttachmentNotFound( filter='attachment_id = ' + attachment_id) def volume_update_status_based_on_attachment(context, volume_id): """Update volume status based on attachment. Get volume and check if 'volume_attachment' parameter is present in volume. If 'volume_attachment' is None then set volume status to 'available' else set volume status to 'in-use'. :param context: context to query under :param volume_id: id of volume to be updated :returns: updated volume """ session = get_session() with session.begin(): volume_ref = _volume_get(context, volume_id, session=session) # We need to get and update volume using same session because # there is possibility that instance is deleted between the 'get' # and 'update' volume call. if not volume_ref['volume_attachment']: volume_ref.update({'status': 'available'}) else: volume_ref.update({'status': 'in-use'}) return volume_ref def volume_has_snapshots_filter(): return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted)) def volume_has_undeletable_snapshots_filter(): deletable_statuses = ['available', 'error'] return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted, or_(models.Snapshot.cgsnapshot_id != None, # noqa: != None models.Snapshot.status.notin_(deletable_statuses)), or_(models.Snapshot.group_snapshot_id != None, # noqa: != None models.Snapshot.status.notin_(deletable_statuses)))) def volume_has_snapshots_in_a_cgsnapshot_filter(): return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, models.Snapshot.cgsnapshot_id.isnot(None))) def volume_has_attachments_filter(): return sql.exists().where( and_(models.Volume.id == models.VolumeAttachment.volume_id, models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED, ~models.VolumeAttachment.deleted)) def volume_qos_allows_retype(new_vol_type): """Filter to check that qos allows retyping the volume to new_vol_type. Returned sqlalchemy filter will evaluate to True when volume's status is available or when it's 'in-use' but the qos in new_vol_type is the same as the qos of the volume or when it doesn't exist a consumer spec key that specifies anything other than the back-end in any of the 2 volume_types. """ # Query to get the qos of the volume type new_vol_type q = sql.select([models.VolumeType.qos_specs_id]).where(and_( ~models.VolumeType.deleted, models.VolumeType.id == new_vol_type)) # Construct the filter to check qos when volume is 'in-use' return or_( # If volume is available models.Volume.status == 'available', # Or both volume types have the same qos specs sql.exists().where(and_( ~models.VolumeType.deleted, models.VolumeType.id == models.Volume.volume_type_id, models.VolumeType.qos_specs_id == q.as_scalar())), # Or they are different specs but they are handled by the backend or # it is not specified. The way SQL evaluatels value != 'back-end' # makes it result in False not only for 'back-end' values but for # NULL as well, and with the double negation we ensure that we only # allow QoS with 'consumer' values of 'back-end' and NULL. and_( ~sql.exists().where(and_( ~models.VolumeType.deleted, models.VolumeType.id == models.Volume.volume_type_id, (models.VolumeType.qos_specs_id == models.QualityOfServiceSpecs.specs_id), models.QualityOfServiceSpecs.key == 'consumer', models.QualityOfServiceSpecs.value != 'back-end')), ~sql.exists().where(and_( ~models.VolumeType.deleted, models.VolumeType.id == new_vol_type, (models.VolumeType.qos_specs_id == models.QualityOfServiceSpecs.specs_id), models.QualityOfServiceSpecs.key == 'consumer', models.QualityOfServiceSpecs.value != 'back-end')))) def volume_has_other_project_snp_filter(): return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, models.Volume.project_id != models.Snapshot.project_id)) #################### def _volume_x_metadata_get_query(context, volume_id, model, session=None): return model_query(context, model, session=session, read_deleted="no").\ filter_by(volume_id=volume_id) def _volume_x_metadata_get(context, volume_id, model, session=None): rows = _volume_x_metadata_get_query(context, volume_id, model, session=session).all() result = {} for row in rows: result[row['key']] = row['value'] return result def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec, session=None): result = _volume_x_metadata_get_query(context, volume_id, model, session=session).\ filter_by(key=key).\ first() if not result: if model is models.VolumeGlanceMetadata: raise notfound_exec(id=volume_id) else: raise notfound_exec(metadata_key=key, volume_id=volume_id) return result def _volume_x_metadata_update(context, volume_id, metadata, delete, model, session=None, add=True, update=True): session = session or get_session() metadata = metadata.copy() with session.begin(subtransactions=True): # Set existing metadata to deleted if delete argument is True. This is # committed immediately to the DB if delete: expected_values = {'volume_id': volume_id} # We don't want to delete keys we are going to update if metadata: expected_values['key'] = db.Not(metadata.keys()) conditional_update(context, model, {'deleted': True, 'deleted_at': timeutils.utcnow()}, expected_values) # Get existing metadata db_meta = _volume_x_metadata_get_query(context, volume_id, model).all() save = [] skip = [] # We only want to send changed metadata. for row in db_meta: if row.key in metadata: value = metadata.pop(row.key) if row.value != value and update: # ORM objects will not be saved until we do the bulk save row.value = value save.append(row) continue skip.append(row) # We also want to save non-existent metadata if add: save.extend(model(key=key, value=value, volume_id=volume_id) for key, value in metadata.items()) # Do a bulk save if save: session.bulk_save_objects(save, update_changed_only=True) # Construct result dictionary with current metadata save.extend(skip) result = {row['key']: row['value'] for row in save} return result def _volume_user_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeMetadata, session=session) def _volume_image_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeGlanceMetadata, session=session) @require_context def _volume_user_metadata_get(context, volume_id, session=None): return _volume_x_metadata_get(context, volume_id, models.VolumeMetadata, session=session) @require_context def _volume_user_metadata_get_item(context, volume_id, key, session=None): return _volume_x_metadata_get_item(context, volume_id, key, models.VolumeMetadata, exception.VolumeMetadataNotFound, session=session) @require_context @require_volume_exists def _volume_user_metadata_update(context, volume_id, metadata, delete, session=None): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeMetadata, session=session) @require_context @require_volume_exists def _volume_image_metadata_update(context, volume_id, metadata, delete, session=None): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeGlanceMetadata, session=session) @require_context def _volume_glance_metadata_key_to_id(context, volume_id, key): db_data = volume_glance_metadata_get(context, volume_id) metadata = {meta_entry.key: meta_entry.id for meta_entry in db_data if meta_entry.key == key} metadata_id = metadata[key] return metadata_id @require_context @require_volume_exists def volume_metadata_get(context, volume_id): return _volume_user_metadata_get(context, volume_id) @require_context @require_volume_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_metadata_delete(context, volume_id, key, meta_type): if meta_type == common.METADATA_TYPES.user: (_volume_user_metadata_get_query(context, volume_id). filter_by(key=key). update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) elif meta_type == common.METADATA_TYPES.image: metadata_id = _volume_glance_metadata_key_to_id(context, volume_id, key) (_volume_image_metadata_get_query(context, volume_id). filter_by(id=metadata_id). update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) else: raise exception.InvalidMetadataType(metadata_type=meta_type, id=volume_id) @require_context @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_metadata_update(context, volume_id, metadata, delete, meta_type): if meta_type == common.METADATA_TYPES.user: return _volume_user_metadata_update(context, volume_id, metadata, delete) elif meta_type == common.METADATA_TYPES.image: return _volume_image_metadata_update(context, volume_id, metadata, delete) else: raise exception.InvalidMetadataType(metadata_type=meta_type, id=volume_id) ################### def _volume_admin_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeAdminMetadata, session=session) @require_admin_context @require_volume_exists def _volume_admin_metadata_get(context, volume_id, session=None): return _volume_x_metadata_get(context, volume_id, models.VolumeAdminMetadata, session=session) @require_admin_context @require_volume_exists def _volume_admin_metadata_update(context, volume_id, metadata, delete, session=None, add=True, update=True): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeAdminMetadata, session=session, add=add, update=update) @require_admin_context def volume_admin_metadata_get(context, volume_id): return _volume_admin_metadata_get(context, volume_id) @require_admin_context @require_volume_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_admin_metadata_delete(context, volume_id, key): _volume_admin_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_admin_metadata_update(context, volume_id, metadata, delete, add=True, update=True): return _volume_admin_metadata_update(context, volume_id, metadata, delete, add=add, update=update) ################### @require_context @handle_db_data_error def snapshot_create(context, values): values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), models.SnapshotMetadata) if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session.add(snapshot_ref) return _snapshot_get(context, values['id'], session=session) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def snapshot_destroy(context, snapshot_id): utcnow = timeutils.utcnow() session = get_session() with session.begin(): updated_values = {'status': 'deleted', 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} model_query(context, models.Snapshot, session=session).\ filter_by(id=snapshot_id).\ update(updated_values) model_query(context, models.SnapshotMetadata, session=session).\ filter_by(snapshot_id=snapshot_id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values @require_context def _snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.Snapshot, session=session, project_only=True).\ options(joinedload('volume')).\ options(joinedload('snapshot_metadata')).\ filter_by(id=snapshot_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_context def snapshot_get(context, snapshot_id): return _snapshot_get(context, snapshot_id) @require_admin_context def snapshot_get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """Retrieves all snapshots. If no sorting parameters are specified then returned snapshots are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param filters: dictionary of filters; will do exact matching on values. Special keys host and cluster_name refer to the volume. :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :returns: list of matching snapshots """ if filters and not is_valid_model_filters( models.Snapshot, filters, exclude_list=('host', 'cluster_name', 'availability_zone')): return [] session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Snapshot) # No snapshots would match, return empty list if not query: return [] return query.all() def _snaps_get_query(context, session=None, project_only=False): return model_query(context, models.Snapshot, session=session, project_only=project_only).\ options(joinedload('snapshot_metadata')) @apply_like_filters(model=models.Snapshot) def _process_snaps_filters(query, filters): if filters: filters = filters.copy() exclude_list = ('host', 'cluster_name', 'availability_zone') # Ensure that filters' keys exist on the model or is metadata for key in filters.keys(): # Ensure if filtering based on metadata filter is queried # then the filters value is a dictionary if key == 'metadata': if not isinstance(filters[key], dict): LOG.debug("Metadata filter value is not valid dictionary") return None continue if key in exclude_list: continue # for keys in filter other than metadata and exclude_list # ensure that the keys are in Snapshot modelt try: column_attr = getattr(models.Snapshot, key) prop = getattr(column_attr, 'property') if isinstance(prop, RelationshipProperty): LOG.debug( "'%s' key is not valid, it maps to a relationship.", key) return None except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return None # filter handling for host and cluster name host = filters.pop('host', None) cluster = filters.pop('cluster_name', None) az = filters.pop('availability_zone', None) if host or cluster or az: query = query.join(models.Snapshot.volume) vol_field = models.Volume if host: query = query.filter(_filter_host(vol_field.host, host)) if cluster: query = query.filter(_filter_host(vol_field.cluster_name, cluster)) if az: query = query.filter_by(availability_zone=az) filters_dict = {} LOG.debug("Building query based on filter") for key, value in filters.items(): if key == 'metadata': col_attr = getattr(models.Snapshot, 'snapshot_metadata') for k, v in value.items(): query = query.filter(col_attr.any(key=k, value=v)) else: filters_dict[key] = value # Apply exact matches if filters_dict: query = query.filter_by(**filters_dict) return query @require_context def snapshot_get_all_for_volume(context, volume_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_latest_for_volume(context, volume_id): result = model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).\ options(joinedload('snapshot_metadata')).\ order_by(desc(models.Snapshot.created_at)).\ first() if not result: raise exception.VolumeSnapshotNotFound(volume_id=volume_id) return result @require_context def snapshot_get_all_by_host(context, host, filters=None): if filters and not is_valid_model_filters(models.Snapshot, filters): return [] query = model_query(context, models.Snapshot, read_deleted='no', project_only=True) if filters: query = query.filter_by(**filters) # As a side effect of the introduction of pool-aware scheduler, # newly created volumes will have pool information appended to # 'host' field of a volume record. So a volume record in DB can # now be either form below: # Host # Host#Pool if host and isinstance(host, six.string_types): session = get_session() with session.begin(): host_attr = getattr(models.Volume, 'host') conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] query = query.join(models.Snapshot.volume).filter( or_(*conditions)).options(joinedload('snapshot_metadata')) return query.all() elif not host: return [] @require_context def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(cgsnapshot_id=cgsnapshot_id).\ options(joinedload('volume')).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_all_for_group_snapshot(context, group_snapshot_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(group_snapshot_id=group_snapshot_id).\ options(joinedload('volume')).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """"Retrieves all snapshots in a project. If no sorting parameters are specified then returned snapshots are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param project_id: project for all snapshots being retrieved :param filters: dictionary of filters; will do exact matching on values :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :returns: list of matching snapshots """ if filters and not is_valid_model_filters( models.Snapshot, filters, exclude_list=('host', 'cluster_name', 'availability_zone')): return [] authorize_project_context(context, project_id) # Add project_id to filters filters = filters.copy() if filters else {} filters['project_id'] = project_id session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Snapshot) # No snapshots would match, return empty list if not query: return [] query = query.options(joinedload('snapshot_metadata')) return query.all() @require_context def _snapshot_data_get_for_project(context, project_id, volume_type_id=None, session=None, host=None): authorize_project_context(context, project_id) query = model_query(context, func.count(models.Snapshot.id), func.sum(models.Snapshot.volume_size), read_deleted="no", session=session) if volume_type_id or host: query = query.join('volume') if volume_type_id: query = query.filter( models.Volume.volume_type_id == volume_type_id) if host: query = query.filter(_filter_host(models.Volume.host, host)) result = query.filter(models.Snapshot.project_id == project_id).first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_context def snapshot_data_get_for_project(context, project_id, volume_type_id=None, host=None): return _snapshot_data_get_for_project(context, project_id, volume_type_id, host=host) @require_context def snapshot_get_all_active_by_window(context, begin, end=None, project_id=None): """Return snapshots that were active during window.""" query = model_query(context, models.Snapshot, read_deleted="yes") query = query.filter(or_(models.Snapshot.deleted_at == None, # noqa models.Snapshot.deleted_at > begin)) query = query.options(joinedload(models.Snapshot.volume)) query = query.options(joinedload('snapshot_metadata')) if end: query = query.filter(models.Snapshot.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @handle_db_data_error @require_context def snapshot_update(context, snapshot_id, values): query = model_query(context, models.Snapshot, project_only=True) result = query.filter_by(id=snapshot_id).update(values) if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) @require_context def get_snapshot_summary(context, project_only, filters=None): """Retrieves all snapshots summary. :param context: context to query under :param project_only: limit summary to snapshots :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_snaps_filters function for more information :returns: snapshots summary """ if not (project_only or is_admin_context(context)): raise exception.AdminRequired() query = model_query(context, func.count(models.Snapshot.id), func.sum(models.Snapshot.volume_size), read_deleted="no") if project_only: query = query.filter_by(project_id=context.project_id) if filters: query = _process_snaps_filters(query, filters) if query is None: return [] result = query.first() return result[0] or 0, result[1] or 0 #################### def _snapshot_metadata_get_query(context, snapshot_id, session=None): return model_query(context, models.SnapshotMetadata, session=session, read_deleted="no").\ filter_by(snapshot_id=snapshot_id) @require_context def _snapshot_metadata_get(context, snapshot_id, session=None): rows = _snapshot_metadata_get_query(context, snapshot_id, session).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_snapshot_exists def snapshot_metadata_get(context, snapshot_id): return _snapshot_metadata_get(context, snapshot_id) @require_context @require_snapshot_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def snapshot_metadata_delete(context, snapshot_id, key): _snapshot_metadata_get_query(context, snapshot_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _snapshot_metadata_get_item(context, snapshot_id, key, session=None): result = _snapshot_metadata_get_query(context, snapshot_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.SnapshotMetadataNotFound(metadata_key=key, snapshot_id=snapshot_id) return result @require_context @require_snapshot_exists @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def snapshot_metadata_update(context, snapshot_id, metadata, delete): session = get_session() with session.begin(): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _snapshot_metadata_get(context, snapshot_id, session) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _snapshot_metadata_get_item(context, snapshot_id, meta_key, session) meta_ref.update({'deleted': True, 'deleted_at': timeutils.utcnow()}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _snapshot_metadata_get_item(context, snapshot_id, meta_key, session) except exception.SnapshotMetadataNotFound: meta_ref = models.SnapshotMetadata() item.update({"key": meta_key, "snapshot_id": snapshot_id}) meta_ref.update(item) meta_ref.save(session=session) return snapshot_metadata_get(context, snapshot_id) ################### @handle_db_data_error @require_admin_context def volume_type_create(context, values, projects=None): """Create a new volume type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ if not values.get('id'): values['id'] = str(uuid.uuid4()) projects = projects or [] orm_projects = [] session = get_session() with session.begin(): try: _volume_type_get_by_name(context, values['name'], session) raise exception.VolumeTypeExists(id=values['name']) except exception.VolumeTypeNotFoundByName: pass try: _volume_type_get(context, values['id'], session) raise exception.VolumeTypeExists(id=values['id']) except exception.VolumeTypeNotFound: pass try: values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeType() volume_type_ref.update(values) session.add(volume_type_ref) except Exception as e: raise db_exc.DBError(e) for project in set(projects): access_ref = models.VolumeTypeProjects() access_ref.update({"volume_type_id": volume_type_ref.id, "project_id": project}) access_ref.save(session=session) orm_projects.append(access_ref) volume_type_ref.projects = orm_projects return volume_type_ref @handle_db_data_error @require_admin_context def group_type_create(context, values, projects=None): """Create a new group type. In order to pass in group specs, the values dict should contain a 'group_specs' key/value pair: {'group_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ if not values.get('id'): values['id'] = six.text_type(uuid.uuid4()) projects = projects or [] session = get_session() with session.begin(): try: _group_type_get_by_name(context, values['name'], session) raise exception.GroupTypeExists(id=values['name']) except exception.GroupTypeNotFoundByName: pass try: _group_type_get(context, values['id'], session) raise exception.GroupTypeExists(id=values['id']) except exception.GroupTypeNotFound: pass try: values['group_specs'] = _metadata_refs(values.get('group_specs'), models.GroupTypeSpecs) group_type_ref = models.GroupType() group_type_ref.update(values) session.add(group_type_ref) except Exception as e: raise db_exc.DBError(e) for project in set(projects): access_ref = models.GroupTypeProjects() access_ref.update({"group_type_id": group_type_ref.id, "project_id": project}) access_ref.save(session=session) return group_type_ref def _volume_type_get_query(context, session=None, read_deleted='no', expected_fields=None): expected_fields = expected_fields or [] query = model_query(context, models.VolumeType, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')) for expected in expected_fields: query = query.options(joinedload(expected)) if not context.is_admin: the_filter = [models.VolumeType.is_public == true()] projects_attr = getattr(models.VolumeType, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query def _group_type_get_query(context, session=None, read_deleted='no', expected_fields=None): expected_fields = expected_fields or [] query = model_query(context, models.GroupType, session=session, read_deleted=read_deleted).\ options(joinedload('group_specs')) if 'projects' in expected_fields: query = query.options(joinedload('projects')) if not context.is_admin: the_filter = [models.GroupType.is_public == true()] projects_attr = models.GroupType.projects the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query def _process_volume_types_filters(query, filters): context = filters.pop('context', None) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.VolumeType.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models.VolumeType, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) if 'is_public' in filters: del filters['is_public'] if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.VolumeType, filters): return if filters.get('extra_specs') is not None: the_filter = [] searchdict = filters.pop('extra_specs') extra_specs = getattr(models.VolumeType, 'extra_specs') for k, v in searchdict.items(): # NOTE(tommylikehu): We will use 'LIKE' operator for # 'availability_zones' extra spec as it always store the # AZ list info within the format: "az1, az2,...." if k == 'RESKEY:availability_zones': the_filter.extend([extra_specs.any( models.VolumeTypeExtraSpecs.value.like(u'%%%s%%' % v), key=k, deleted=False)]) else: the_filter.extend( [extra_specs.any(key=k, value=v, deleted=False)]) if len(the_filter) > 1: query = query.filter(and_(*the_filter)) else: query = query.filter(the_filter[0]) query = query.filter_by(**filters) return query def _process_group_types_filters(query, filters): context = filters.pop('context', None) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.GroupType.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models.GroupType, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id, deleted=False) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) if 'is_public' in filters: del filters['is_public'] if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.GroupType, filters): return if filters.get('group_specs') is not None: the_filter = [] searchdict = filters.pop('group_specs') group_specs = getattr(models.GroupType, 'group_specs') for k, v in searchdict.items(): the_filter.extend([group_specs.any(key=k, value=v, deleted=False)]) if len(the_filter) > 1: query = query.filter(and_(*the_filter)) else: query = query.filter(the_filter[0]) query = query.filter_by(**filters) return query @handle_db_data_error @require_admin_context def _type_update(context, type_id, values, is_group): if is_group: model = models.GroupType exists_exc = exception.GroupTypeExists else: model = models.VolumeType exists_exc = exception.VolumeTypeExists session = get_session() with session.begin(): # No description change if values['description'] is None: del values['description'] # No is_public change if values['is_public'] is None: del values['is_public'] # No name change if values['name'] is None: del values['name'] else: # Group type name is unique. If change to a name that belongs to # a different group_type, it should be prevented. conditions = and_(model.name == values['name'], model.id != type_id, ~model.deleted) query = session.query(sql.exists().where(conditions)) if query.scalar(): raise exists_exc(id=values['name']) query = model_query(context, model, project_only=True, session=session) result = query.filter_by(id=type_id).update(values) if not result: if is_group: raise exception.GroupTypeNotFound(group_type_id=type_id) else: raise exception.VolumeTypeNotFound(volume_type_id=type_id) def volume_type_update(context, volume_type_id, values): _type_update(context, volume_type_id, values, is_group=False) def group_type_update(context, group_type_id, values): _type_update(context, group_type_id, values, is_group=True) @require_context def volume_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): """Returns a dict describing all volume_types with name as key. If no sort parameters are specified then the returned volume types are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_type_filters function for more information :param list_result: For compatibility, if list_result = True, return a list instead of dict. :returns: list/dict of matching volume types """ session = get_session() with session.begin(): # Add context for _process_volume_types_filters filters = filters or {} filters['context'] = context # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.VolumeType) # No volume types would match, return empty dict or list if query is None: if list_result: return [] return {} rows = query.all() if list_result: result = [_dict_with_extra_specs_if_authorized(context, row) for row in rows] return result result = {row['name']: _dict_with_extra_specs_if_authorized(context, row) for row in rows} return result @require_context def group_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): """Returns a dict describing all group_types with name as key. If no sort parameters are specified then the returned group types are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_type_filters function for more information :param list_result: For compatibility, if list_result = True, return a list instead of dict. :returns: list/dict of matching group types """ session = get_session() with session.begin(): # Add context for _process_group_types_filters filters = filters or {} filters['context'] = context # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.GroupType) # No group types would match, return empty dict or list if query is None: if list_result: return [] return {} rows = query.all() if list_result: result = [_dict_with_group_specs_if_authorized(context, row) for row in rows] return result result = {row['name']: _dict_with_group_specs_if_authorized(context, row) for row in rows} return result def _volume_type_get_id_from_volume_type_query(context, id, session=None): return model_query( context, models.VolumeType.id, read_deleted="no", session=session, base_model=models.VolumeType).\ filter_by(id=id) def _group_type_get_id_from_group_type_query(context, id, session=None): return model_query( context, models.GroupType.id, read_deleted="no", session=session, base_model=models.GroupType).\ filter_by(id=id) def _volume_type_get_id_from_volume_type(context, id, session=None): result = _volume_type_get_id_from_volume_type_query( context, id, session=session).first() if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) return result[0] def _group_type_get_id_from_group_type(context, id, session=None): result = _group_type_get_id_from_group_type_query( context, id, session=session).first() if not result: raise exception.GroupTypeNotFound(group_type_id=id) return result[0] def _volume_type_get_db_object(context, id, session=None, inactive=False, expected_fields=None): read_deleted = "yes" if inactive else "no" result = _volume_type_get_query( context, session, read_deleted, expected_fields).\ filter_by(id=id).\ first() return result def _group_type_get_db_object(context, id, session=None, inactive=False, expected_fields=None): read_deleted = "yes" if inactive else "no" result = _group_type_get_query( context, session, read_deleted, expected_fields).\ filter_by(id=id).\ first() return result @require_context def _volume_type_get(context, id, session=None, inactive=False, expected_fields=None): expected_fields = expected_fields or [] result = _volume_type_get_db_object(context, id, session, inactive, expected_fields) if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) vtype = _dict_with_extra_specs_if_authorized(context, result) if 'projects' in expected_fields: vtype['projects'] = [p['project_id'] for p in result['projects']] if 'qos_specs' in expected_fields: vtype['qos_specs'] = result.qos_specs return vtype @require_context def _group_type_get(context, id, session=None, inactive=False, expected_fields=None): expected_fields = expected_fields or [] result = _group_type_get_db_object(context, id, session, inactive, expected_fields) if not result: raise exception.GroupTypeNotFound(group_type_id=id) gtype = _dict_with_group_specs_if_authorized(context, result) if 'projects' in expected_fields: gtype['projects'] = [p['project_id'] for p in result['projects']] return gtype @require_context def volume_type_get(context, id, inactive=False, expected_fields=None): """Return a dict describing specific volume_type.""" return _volume_type_get(context, id, session=None, inactive=inactive, expected_fields=expected_fields) @require_context def group_type_get(context, id, inactive=False, expected_fields=None): """Return a dict describing specific group_type.""" return _group_type_get(context, id, session=None, inactive=inactive, expected_fields=expected_fields) def _volume_type_get_full(context, id): """Return dict for a specific volume_type with extra_specs and projects.""" return _volume_type_get(context, id, session=None, inactive=False, expected_fields=('extra_specs', 'projects')) def _group_type_get_full(context, id): """Return dict for a specific group_type with group_specs and projects.""" return _group_type_get(context, id, session=None, inactive=False, expected_fields=('group_specs', 'projects')) @require_context def _volume_type_ref_get(context, id, session=None, inactive=False): read_deleted = "yes" if inactive else "no" result = model_query(context, models.VolumeType, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) return result @require_context def _group_type_ref_get(context, id, session=None, inactive=False): read_deleted = "yes" if inactive else "no" result = model_query(context, models.GroupType, session=session, read_deleted=read_deleted).\ options(joinedload('group_specs')).\ filter_by(id=id).\ first() if not result: raise exception.GroupTypeNotFound(group_type_id=id) return result @require_context def _volume_type_get_by_name(context, name, session=None): result = model_query(context, models.VolumeType, session=session).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) return _dict_with_extra_specs_if_authorized(context, result) @require_context def _group_type_get_by_name(context, name, session=None): result = model_query(context, models.GroupType, session=session).\ options(joinedload('group_specs')).\ filter_by(name=name).\ first() if not result: raise exception.GroupTypeNotFoundByName(group_type_name=name) return _dict_with_group_specs_if_authorized(context, result) @require_context def volume_type_get_by_name(context, name): """Return a dict describing specific volume_type.""" return _volume_type_get_by_name(context, name) @require_context def group_type_get_by_name(context, name): """Return a dict describing specific group_type.""" return _group_type_get_by_name(context, name) @require_context def volume_types_get_by_name_or_id(context, volume_type_list): """Return a dict describing specific volume_type.""" req_volume_types = [] for vol_t in volume_type_list: if not uuidutils.is_uuid_like(vol_t): vol_type = _volume_type_get_by_name(context, vol_t) else: try: vol_type = _volume_type_get(context, vol_t) except exception.VolumeTypeNotFound: # check again if we get this volume type by uuid-like name try: vol_type = _volume_type_get_by_name(context, vol_t) except exception.VolumeTypeNotFoundByName: raise exception.VolumeTypeNotFound(volume_type_id=vol_t) req_volume_types.append(vol_type) return req_volume_types @require_context def group_types_get_by_name_or_id(context, group_type_list): """Return a dict describing specific group_type.""" req_group_types = [] for grp_t in group_type_list: if not uuidutils.is_uuid_like(grp_t): grp_type = _group_type_get_by_name(context, grp_t) else: grp_type = _group_type_get(context, grp_t) req_group_types.append(grp_type) return req_group_types @require_admin_context def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): read_deleted = "yes" if inactive else "no" # Raise QoSSpecsNotFound if no specs found if not resource_exists(context, models.QualityOfServiceSpecs, qos_specs_id): raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) vts = (model_query(context, models.VolumeType, read_deleted=read_deleted). options(joinedload('extra_specs')). options(joinedload('projects')). filter_by(qos_specs_id=qos_specs_id).all()) return vts @require_admin_context def volume_type_qos_associate(context, type_id, qos_specs_id): session = get_session() with session.begin(): _volume_type_get(context, type_id, session) session.query(models.VolumeType). \ filter_by(id=type_id). \ update({'qos_specs_id': qos_specs_id, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_disassociate(context, qos_specs_id, type_id): """Disassociate volume type from qos specs.""" session = get_session() with session.begin(): _volume_type_get(context, type_id, session) session.query(models.VolumeType). \ filter_by(id=type_id). \ filter_by(qos_specs_id=qos_specs_id). \ update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_disassociate_all(context, qos_specs_id): """Disassociate all volume types associated with specified qos specs.""" session = get_session() with session.begin(): session.query(models.VolumeType). \ filter_by(qos_specs_id=qos_specs_id). \ update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_specs_get(context, type_id): """Return all qos specs for given volume type. result looks like: { 'qos_specs': { 'id': 'qos-specs-id', 'name': 'qos_specs_name', 'consumer': 'Consumer', 'specs': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3' } } } """ session = get_session() with session.begin(): _volume_type_get(context, type_id, session) row = session.query(models.VolumeType). \ options(joinedload('qos_specs')). \ filter_by(id=type_id). \ first() # row.qos_specs is a list of QualityOfServiceSpecs ref specs = _dict_with_qos_specs(row.qos_specs) if not specs: # turn empty list to None specs = None else: specs = specs[0] return {'qos_specs': specs} @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_type_destroy(context, id): utcnow = timeutils.utcnow() session = get_session() with session.begin(): _volume_type_get(context, id, session) results = model_query(context, models.Volume, session=session). \ filter_by(volume_type_id=id).all() group_count = model_query(context, models.GroupVolumeTypeMapping, read_deleted="no", session=session).\ filter_by(volume_type_id=id).count() cg_count = model_query(context, models.ConsistencyGroup, session=session).filter( models.ConsistencyGroup.volume_type_id.contains(id)).count() if results or group_count or cg_count: LOG.error('VolumeType %s deletion failed, VolumeType in use.', id) raise exception.VolumeTypeInUse(volume_type_id=id) updated_values = {'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} model_query(context, models.VolumeType, session=session).\ filter_by(id=id).\ update(updated_values) model_query(context, models.VolumeTypeExtraSpecs, session=session).\ filter_by(volume_type_id=id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) model_query(context, models.Encryption, session=session).\ filter_by(volume_type_id=id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) model_query(context, models.VolumeTypeProjects, session=session, read_deleted="int_no").filter_by( volume_type_id=id).soft_delete(synchronize_session=False) del updated_values['updated_at'] return updated_values @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def group_type_destroy(context, id): session = get_session() with session.begin(): _group_type_get(context, id, session) results = model_query(context, models.Group, session=session). \ filter_by(group_type_id=id).all() if results: LOG.error('GroupType %s deletion failed, ' 'GroupType in use.', id) raise exception.GroupTypeInUse(group_type_id=id) model_query(context, models.GroupType, session=session).\ filter_by(id=id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) model_query(context, models.GroupTypeSpecs, session=session).\ filter_by(group_type_id=id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_get_all_active_by_window(context, begin, end=None, project_id=None): """Return volumes that were active during window.""" query = model_query(context, models.Volume, read_deleted="yes") query = query.filter(or_(models.Volume.deleted_at == None, # noqa models.Volume.deleted_at > begin)) if end: query = query.filter(models.Volume.created_at < end) if project_id: query = query.filter_by(project_id=project_id) query = (query.options(joinedload('volume_metadata')). options(joinedload('volume_type')). options(joinedload('volume_attachment')). options(joinedload('consistencygroup')). options(joinedload('group'))) if is_admin_context(context): query = query.options(joinedload('volume_admin_metadata')) return query.all() def _volume_type_access_query(context, session=None): return model_query(context, models.VolumeTypeProjects, session=session, read_deleted="int_no") def _group_type_access_query(context, session=None): return model_query(context, models.GroupTypeProjects, session=session, read_deleted="no") @require_admin_context def volume_type_access_get_all(context, type_id): volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) return _volume_type_access_query(context).\ filter_by(volume_type_id=volume_type_id).all() @require_admin_context def group_type_access_get_all(context, type_id): group_type_id = _group_type_get_id_from_group_type(context, type_id) return _group_type_access_query(context).\ filter_by(group_type_id=group_type_id).all() def _group_volume_type_mapping_query(context, session=None): return model_query(context, models.GroupVolumeTypeMapping, session=session, read_deleted="no") @require_admin_context def volume_type_get_all_by_group(context, group_id): # Generic volume group mappings = (_group_volume_type_mapping_query(context). filter_by(group_id=group_id).all()) session = get_session() with session.begin(): volume_type_ids = [mapping.volume_type_id for mapping in mappings] query = (model_query(context, models.VolumeType, session=session, read_deleted='no'). filter(models.VolumeType.id.in_(volume_type_ids)). options(joinedload('extra_specs')). options(joinedload('projects')). all()) return query def _group_volume_type_mapping_get_all_by_group_volume_type(context, group_id, volume_type_id): mappings = _group_volume_type_mapping_query(context).\ filter_by(group_id=group_id).\ filter_by(volume_type_id=volume_type_id).all() return mappings @require_admin_context def volume_type_access_add(context, type_id, project_id): """Add given tenant to the volume type access list.""" volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) access_ref = models.VolumeTypeProjects() access_ref.update({"volume_type_id": volume_type_id, "project_id": project_id}) session = get_session() with session.begin(): try: access_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.VolumeTypeAccessExists(volume_type_id=type_id, project_id=project_id) return access_ref @require_admin_context def group_type_access_add(context, type_id, project_id): """Add given tenant to the group type access list.""" group_type_id = _group_type_get_id_from_group_type(context, type_id) access_ref = models.GroupTypeProjects() access_ref.update({"group_type_id": group_type_id, "project_id": project_id}) session = get_session() with session.begin(): try: access_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.GroupTypeAccessExists(group_type_id=type_id, project_id=project_id) return access_ref @require_admin_context def volume_type_access_remove(context, type_id, project_id): """Remove given tenant from the volume type access list.""" volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) count = (_volume_type_access_query(context). filter_by(volume_type_id=volume_type_id). filter_by(project_id=project_id). soft_delete(synchronize_session=False)) if count == 0: raise exception.VolumeTypeAccessNotFound( volume_type_id=type_id, project_id=project_id) @require_admin_context def group_type_access_remove(context, type_id, project_id): """Remove given tenant from the group type access list.""" group_type_id = _group_type_get_id_from_group_type(context, type_id) count = (_group_type_access_query(context). filter_by(group_type_id=group_type_id). filter_by(project_id=project_id). soft_delete(synchronize_session=False)) if count == 0: raise exception.GroupTypeAccessNotFound( group_type_id=type_id, project_id=project_id) #################### def _volume_type_extra_specs_query(context, volume_type_id, session=None): return model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id) @require_context def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): session = get_session() with session.begin(): _volume_type_extra_specs_get_item(context, volume_type_id, key, session) _volume_type_extra_specs_query(context, volume_type_id, session).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): result = _volume_type_extra_specs_query( context, volume_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeTypeExtraSpecsNotFound( extra_specs_key=key, volume_type_id=volume_type_id) return result @handle_db_data_error @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() with session.begin(): spec_ref = None for key, value in specs.items(): try: spec_ref = _volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": False}) spec_ref.save(session=session) return specs #################### def _group_type_specs_query(context, group_type_id, session=None): return model_query(context, models.GroupTypeSpecs, session=session, read_deleted="no").\ filter_by(group_type_id=group_type_id) @require_context def group_type_specs_get(context, group_type_id): rows = _group_type_specs_query(context, group_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def group_type_specs_delete(context, group_type_id, key): session = get_session() with session.begin(): _group_type_specs_get_item(context, group_type_id, key, session) _group_type_specs_query(context, group_type_id, session).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _group_type_specs_get_item(context, group_type_id, key, session=None): result = _group_type_specs_query( context, group_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.GroupTypeSpecsNotFound( group_specs_key=key, group_type_id=group_type_id) return result @handle_db_data_error @require_context def group_type_specs_update_or_create(context, group_type_id, specs): session = get_session() with session.begin(): spec_ref = None for key, value in specs.items(): try: spec_ref = _group_type_specs_get_item( context, group_type_id, key, session) except exception.GroupTypeSpecsNotFound: spec_ref = models.GroupTypeSpecs() spec_ref.update({"key": key, "value": value, "group_type_id": group_type_id, "deleted": False}) spec_ref.save(session=session) return specs #################### @require_admin_context def qos_specs_create(context, values): """Create a new QoS specs. :param values dictionary that contains specifications for QoS Expected format of the input parameter: .. code-block:: json { 'name': 'Name', 'consumer': 'front-end', 'specs': { 'total_iops_sec': 1000, 'total_bytes_sec': 1024000 } } """ specs_id = str(uuid.uuid4()) session = get_session() with session.begin(): try: _qos_specs_get_all_by_name(context, values['name'], session) raise exception.QoSSpecsExists(specs_id=values['name']) except exception.QoSSpecsNotFound: pass try: # Insert a root entry for QoS specs specs_root = models.QualityOfServiceSpecs() root = dict(id=specs_id) # 'QoS_Specs_Name' is an internal reserved key to store # the name of QoS specs root['key'] = 'QoS_Specs_Name' root['value'] = values['name'] LOG.debug("DB qos_specs_create(): root %s", root) specs_root.update(root) specs_root.save(session=session) # Save 'consumer' value directly as it will not be in # values['specs'] and so we avoid modifying/copying passed in dict consumer = {'key': 'consumer', 'value': values['consumer'], 'specs_id': specs_id, 'id': six.text_type(uuid.uuid4())} cons_entry = models.QualityOfServiceSpecs() cons_entry.update(consumer) cons_entry.save(session=session) # Insert all specification entries for QoS specs for k, v in values.get('specs', {}).items(): item = dict(key=k, value=v, specs_id=specs_id) item['id'] = str(uuid.uuid4()) spec_entry = models.QualityOfServiceSpecs() spec_entry.update(item) spec_entry.save(session=session) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) except Exception as e: raise db_exc.DBError(e) return dict(id=specs_root.id, name=specs_root.value) @require_admin_context def _qos_specs_get_all_by_name(context, name, session=None, inactive=False): read_deleted = 'yes' if inactive else 'no' results = model_query(context, models.QualityOfServiceSpecs, read_deleted=read_deleted, session=session). \ filter_by(key='QoS_Specs_Name'). \ filter_by(value=name). \ options(joinedload('specs')).all() if not results: raise exception.QoSSpecsNotFound(specs_id=name) return results @require_admin_context def _qos_specs_get_all_ref(context, qos_specs_id, session=None, inactive=False): read_deleted = 'yes' if inactive else 'no' result = model_query(context, models.QualityOfServiceSpecs, read_deleted=read_deleted, session=session). \ filter_by(id=qos_specs_id). \ options(joinedload_all('specs')).all() if not result: raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) return result def _dict_with_children_specs(specs): """Convert specs list to a dict.""" result = {} update_time = None for spec in specs: # Skip deleted keys if not spec['deleted']: # Add update time to specs list, in order to get the keyword # 'updated_at' in specs info when printing logs. if not update_time and spec['updated_at']: update_time = spec['updated_at'] elif update_time and spec['updated_at']: if (update_time - spec['updated_at']).total_seconds() < 0: update_time = spec['updated_at'] result.update({spec['key']: spec['value']}) if update_time: result.update({'updated_at': update_time}) return result def _dict_with_qos_specs(rows): """Convert qos specs query results to list. Qos specs query results are a list of quality_of_service_specs refs, some are root entry of a qos specs (key == 'QoS_Specs_Name') and the rest are children entry, a.k.a detailed specs for a qos specs. This function converts query results to a dict using spec name as key. """ result = [] for row in rows: if row['key'] == 'QoS_Specs_Name': # Add create time for member, in order to get the keyword # 'created_at' in the specs info when printing logs. member = {'name': row['value'], 'id': row['id'], 'created_at': row['created_at']} if row.specs: spec_dict = _dict_with_children_specs(row.specs) member['consumer'] = spec_dict.pop('consumer') if spec_dict.get('updated_at'): member['updated_at'] = spec_dict.pop('updated_at') member.update(dict(specs=spec_dict)) result.append(member) return result @require_admin_context def qos_specs_get(context, qos_specs_id, inactive=False): rows = _qos_specs_get_all_ref(context, qos_specs_id, None, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context def qos_specs_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Returns a list of all qos_specs. Results is like: [{ 'id': SPECS-UUID, 'name': 'qos_spec-1', 'consumer': 'back-end', 'specs': { 'key1': 'value1', 'key2': 'value2', ... } }, { 'id': SPECS-UUID, 'name': 'qos_spec-2', 'consumer': 'front-end', 'specs': { 'key1': 'value1', 'key2': 'value2', ... } }, ] """ session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.QualityOfServiceSpecs) # No Qos specs would match, return empty list if query is None: return [] rows = query.all() return _dict_with_qos_specs(rows) @require_admin_context def _qos_specs_get_query(context, session): rows = model_query(context, models.QualityOfServiceSpecs, session=session, read_deleted='no').\ options(joinedload_all('specs')).filter_by(key='QoS_Specs_Name') return rows def _process_qos_specs_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.QualityOfServiceSpecs, filters): return query = query.filter_by(**filters) return query @require_admin_context def _qos_specs_get(context, qos_spec_id, session=None): result = model_query(context, models.QualityOfServiceSpecs, session=session, read_deleted='no').\ filter_by(id=qos_spec_id).filter_by(key='QoS_Specs_Name').first() if not result: raise exception.QoSSpecsNotFound(specs_id=qos_spec_id) return result @require_admin_context def qos_specs_get_by_name(context, name, inactive=False): rows = _qos_specs_get_all_by_name(context, name, None, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context def qos_specs_associations_get(context, qos_specs_id): """Return all entities associated with specified qos specs. For now, the only entity that is possible to associate with a qos specs is volume type, so this is just a wrapper of volume_type_qos_associations_get(). But it's possible to extend qos specs association to other entities, such as volumes, sometime in future. """ return volume_type_qos_associations_get(context, qos_specs_id) @require_admin_context def qos_specs_associate(context, qos_specs_id, type_id): """Associate volume type from specified qos specs.""" return volume_type_qos_associate(context, type_id, qos_specs_id) @require_admin_context def qos_specs_disassociate(context, qos_specs_id, type_id): """Disassociate volume type from specified qos specs.""" return volume_type_qos_disassociate(context, qos_specs_id, type_id) @require_admin_context def qos_specs_disassociate_all(context, qos_specs_id): """Disassociate all entities associated with specified qos specs. For now, the only entity that is possible to associate with a qos specs is volume type, so this is just a wrapper of volume_type_qos_disassociate_all(). But it's possible to extend qos specs association to other entities, such as volumes, sometime in future. """ return volume_type_qos_disassociate_all(context, qos_specs_id) @require_admin_context def qos_specs_item_delete(context, qos_specs_id, key): session = get_session() with session.begin(): session.query(models.QualityOfServiceSpecs). \ filter(models.QualityOfServiceSpecs.key == key). \ filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def qos_specs_delete(context, qos_specs_id): session = get_session() with session.begin(): _qos_specs_get_all_ref(context, qos_specs_id, session) updated_values = {'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')} session.query(models.QualityOfServiceSpecs).\ filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id, models.QualityOfServiceSpecs.specs_id == qos_specs_id)).\ update(updated_values) del updated_values['updated_at'] return updated_values @require_admin_context def _qos_specs_get_item(context, qos_specs_id, key, session=None): result = model_query(context, models.QualityOfServiceSpecs, session=session). \ filter(models.QualityOfServiceSpecs.key == key). \ filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ first() if not result: raise exception.QoSSpecsKeyNotFound( specs_key=key, specs_id=qos_specs_id) return result @handle_db_data_error @require_admin_context def qos_specs_update(context, qos_specs_id, updates): """Make updates to an existing qos specs. Perform add, update or delete key/values to a qos specs. """ session = get_session() with session.begin(): # make sure qos specs exists exists = resource_exists(context, models.QualityOfServiceSpecs, qos_specs_id, session) if not exists: raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) specs = updates.get('specs', {}) if 'consumer' in updates: # Massage consumer to the right place for DB and copy specs # before updating so we don't modify dict for caller specs = specs.copy() specs['consumer'] = updates['consumer'] spec_ref = None for key in specs.keys(): try: spec_ref = _qos_specs_get_item( context, qos_specs_id, key, session) except exception.QoSSpecsKeyNotFound: spec_ref = models.QualityOfServiceSpecs() id = None if spec_ref.get('id', None): id = spec_ref['id'] else: id = str(uuid.uuid4()) value = dict(id=id, key=key, value=specs[key], specs_id=qos_specs_id, deleted=False) LOG.debug('qos_specs_update() value: %s', value) spec_ref.update(value) spec_ref.save(session=session) return specs #################### @require_context def volume_type_encryption_get(context, volume_type_id, session=None): return model_query(context, models.Encryption, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id).first() @require_admin_context def volume_type_encryption_delete(context, volume_type_id): session = get_session() with session.begin(): encryption = volume_type_encryption_get(context, volume_type_id, session) if not encryption: raise exception.VolumeTypeEncryptionNotFound( type_id=volume_type_id) encryption.update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @handle_db_data_error @require_admin_context def volume_type_encryption_create(context, volume_type_id, values): session = get_session() with session.begin(): encryption = models.Encryption() if 'volume_type_id' not in values: values['volume_type_id'] = volume_type_id if 'encryption_id' not in values: values['encryption_id'] = six.text_type(uuid.uuid4()) encryption.update(values) session.add(encryption) return encryption @handle_db_data_error @require_admin_context def volume_type_encryption_update(context, volume_type_id, values): query = model_query(context, models.Encryption) result = query.filter_by(volume_type_id=volume_type_id).update(values) if not result: raise exception.VolumeTypeEncryptionNotFound(type_id=volume_type_id) def volume_type_encryption_volume_get(context, volume_type_id, session=None): volume_list = _volume_get_query(context, session=session, project_only=False).\ filter_by(volume_type_id=volume_type_id).\ all() return volume_list #################### @require_context def volume_encryption_metadata_get(context, volume_id, session=None): """Return the encryption metadata for a given volume.""" volume_ref = _volume_get(context, volume_id) encryption_ref = volume_type_encryption_get(context, volume_ref['volume_type_id']) values = { 'encryption_key_id': volume_ref['encryption_key_id'], } if encryption_ref: for key in ['control_location', 'cipher', 'key_size', 'provider']: values[key] = encryption_ref[key] return values #################### @require_context def _volume_glance_metadata_get_all(context, session=None): query = model_query(context, models.VolumeGlanceMetadata, session=session) if is_user_context(context): query = query.filter( models.Volume.id == models.VolumeGlanceMetadata.volume_id, models.Volume.project_id == context.project_id) return query.all() @require_context def volume_glance_metadata_get_all(context): """Return the Glance metadata for all volumes.""" return _volume_glance_metadata_get_all(context) @require_context def volume_glance_metadata_list_get(context, volume_id_list): """Return the glance metadata for a volume list.""" query = model_query(context, models.VolumeGlanceMetadata, session=None) query = query.filter( models.VolumeGlanceMetadata.volume_id.in_(volume_id_list)) return query.all() @require_context @require_volume_exists def _volume_glance_metadata_get(context, volume_id, session=None): rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ all() if not rows: raise exception.GlanceMetadataNotFound(id=volume_id) return rows @require_context def volume_glance_metadata_get(context, volume_id): """Return the Glance metadata for the specified volume.""" return _volume_glance_metadata_get(context, volume_id) @require_context @require_snapshot_exists def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ filter_by(snapshot_id=snapshot_id).\ filter_by(deleted=False).\ all() if not rows: raise exception.GlanceMetadataNotFound(id=snapshot_id) return rows @require_context def volume_snapshot_glance_metadata_get(context, snapshot_id): """Return the Glance metadata for the specified snapshot.""" return _volume_snapshot_glance_metadata_get(context, snapshot_id) @require_context @require_volume_exists def volume_glance_metadata_create(context, volume_id, key, value): """Update the Glance metadata for a volume by adding a new key:value pair. This API does not support changing the value of a key once it has been created. """ session = get_session() with session.begin(): rows = session.query(models.VolumeGlanceMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).all() if len(rows) > 0: raise exception.GlanceMetadataExists(key=key, volume_id=volume_id) vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = key vol_glance_metadata.value = six.text_type(value) session.add(vol_glance_metadata) return @require_context @require_volume_exists def volume_glance_metadata_bulk_create(context, volume_id, metadata): """Update the Glance metadata for a volume by adding new key:value pairs. This API does not support changing the value of a key once it has been created. """ session = get_session() with session.begin(): for (key, value) in metadata.items(): rows = session.query(models.VolumeGlanceMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).all() if len(rows) > 0: raise exception.GlanceMetadataExists(key=key, volume_id=volume_id) vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = key vol_glance_metadata.value = six.text_type(value) session.add(vol_glance_metadata) @require_context @require_snapshot_exists def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): """Update the Glance metadata for a snapshot. This copies all of the key:value pairs from the originating volume, to ensure that a volume created from the snapshot will retain the original metadata. """ session = get_session() with session.begin(): metadata = _volume_glance_metadata_get(context, volume_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.snapshot_id = snapshot_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context def volume_glance_metadata_copy_from_volume_to_volume(context, src_volume_id, volume_id): """Update the Glance metadata for a volume. This copies all of the key:value pairs from the originating volume, to ensure that a volume created from the volume (clone) will retain the original metadata. """ session = get_session() with session.begin(): metadata = _volume_glance_metadata_get(context, src_volume_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context @require_volume_exists def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): """Update Glance metadata from a volume. Update the Glance metadata from a volume (created from a snapshot) by copying all of the key:value pairs from the originating snapshot. This is so that the Glance metadata from the original volume is retained. """ session = get_session() with session.begin(): metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context def volume_glance_metadata_delete_by_volume(context, volume_id): model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ filter_by(snapshot_id=snapshot_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) ############################### @require_context def backup_get(context, backup_id, read_deleted=None, project_only=True): return _backup_get(context, backup_id, read_deleted=read_deleted, project_only=project_only) def _backup_get(context, backup_id, session=None, read_deleted=None, project_only=True): result = model_query( context, models.Backup, session=session, project_only=project_only, read_deleted=read_deleted).options( joinedload('backup_metadata')).filter_by(id=backup_id).first() if not result: raise exception.BackupNotFound(backup_id=backup_id) return result def _backup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.Backup, filters): return [] session = get_session() with session.begin(): # Generate the paginate query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Backup) if query is None: return [] return query.all() def _backups_get_query(context, session=None, project_only=False): return model_query( context, models.Backup, session=session, project_only=project_only).options(joinedload('backup_metadata')) @apply_like_filters(model=models.Backup) def _process_backups_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Backup, filters): return filters_dict = {} for key, value in filters.items(): if key == 'metadata': col_attr = getattr(models.Backup, 'backup_metadata') for k, v in value.items(): query = query.filter(col_attr.any(key=k, value=v)) else: filters_dict[key] = value # Apply exact matches if filters_dict: query = query.filter_by(**filters_dict) return query @require_admin_context def backup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return _backup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_admin_context def backup_get_all_by_host(context, host): return model_query( context, models.Backup).options( joinedload('backup_metadata')).filter_by(host=host).all() @require_context def backup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _backup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def backup_get_all_by_volume(context, volume_id, filters=None): authorize_project_context(context, volume_id) if not filters: filters = {} else: filters = filters.copy() filters['volume_id'] = volume_id return _backup_get_all(context, filters) @require_context def backup_get_all_active_by_window(context, begin, end=None, project_id=None): """Return backups that were active during window.""" query = model_query(context, models.Backup, read_deleted="yes").options( joinedload('backup_metadata')) query = query.filter(or_(models.Backup.deleted_at == None, # noqa models.Backup.deleted_at > begin)) if end: query = query.filter(models.Backup.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @handle_db_data_error @require_context def backup_create(context, values): values['backup_metadata'] = _metadata_refs(values.get('metadata'), models.BackupMetadata) if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): backup_ref = models.Backup() backup_ref.update(values) session.add(backup_ref) return _backup_get(context, values['id'], session=session) @handle_db_data_error @require_context def backup_update(context, backup_id, values): if 'fail_reason' in values: values = values.copy() values['fail_reason'] = (values['fail_reason'] or '')[:255] query = model_query(context, models.Backup, read_deleted="yes") result = query.filter_by(id=backup_id).update(values) if not result: raise exception.BackupNotFound(backup_id=backup_id) @require_admin_context def backup_destroy(context, backup_id): utcnow = timeutils.utcnow() updated_values = {'status': fields.BackupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} session = get_session() with session.begin(): model_query(context, models.Backup, session=session).\ filter_by(id=backup_id).\ update(updated_values) model_query(context, models.BackupMetadata, session=session).\ filter_by(backup_id=backup_id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values @require_context @require_backup_exists def backup_metadata_get(context, backup_id): return _backup_metadata_get(context, backup_id) @require_context def _backup_metadata_get(context, backup_id, session=None): rows = _backup_metadata_get_query(context, backup_id, session).all() result = {} for row in rows: result[row['key']] = row['value'] return result def _backup_metadata_get_query(context, backup_id, session=None): return model_query( context, models.BackupMetadata, session=session, read_deleted="no").filter_by(backup_id=backup_id) @require_context def _backup_metadata_get_item(context, backup_id, key, session=None): result = _backup_metadata_get_query( context, backup_id, session=session).filter_by(key=key).first() if not result: raise exception.BackupMetadataNotFound(metadata_key=key, backup_id=backup_id) return result @require_context @require_backup_exists @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def backup_metadata_update(context, backup_id, metadata, delete): session = get_session() with session.begin(): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _backup_metadata_get(context, backup_id, session) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _backup_metadata_get_item(context, backup_id, meta_key, session) meta_ref.update({'deleted': True, 'deleted_at': timeutils.utcnow()}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _backup_metadata_get_item(context, backup_id, meta_key, session) except exception.BackupMetadataNotFound: meta_ref = models.BackupMetadata() item.update({"key": meta_key, "backup_id": backup_id}) meta_ref.update(item) meta_ref.save(session=session) return backup_metadata_get(context, backup_id) ############################### @require_context def _transfer_get(context, transfer_id, session=None): query = model_query(context, models.Transfer, session=session).\ filter_by(id=transfer_id) if not is_admin_context(context): volume = models.Volume query = query.filter(models.Transfer.volume_id == volume.id, volume.project_id == context.project_id) result = query.first() if not result: raise exception.TransferNotFound(transfer_id=transfer_id) return result @require_context def transfer_get(context, transfer_id): return _transfer_get(context, transfer_id) def _process_transfer_filters(query, filters): if filters: project_id = filters.pop('project_id', None) # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Transfer, filters): return if project_id: volume = models.Volume query = query.filter(volume.id == models.Transfer.volume_id, volume.project_id == project_id) query = query.filter_by(**filters) return query def _translate_transfers(transfers): fields = ('id', 'volume_id', 'display_name', 'created_at', 'deleted', 'no_snapshots', 'source_project_id', 'destination_project_id', 'accepted') return [{k: transfer[k] for k in fields} for transfer in transfers] def _transfer_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Transfer) if query is None: return [] return _translate_transfers(query.all()) @require_admin_context def transfer_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): return _transfer_get_all(context, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) def _transfer_get_query(context, session=None, project_only=False): return model_query(context, models.Transfer, session=session, project_only=project_only) @require_context def transfer_get_all_by_project(context, project_id, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): authorize_project_context(context, project_id) filters = filters.copy() if filters else {} filters['project_id'] = project_id return _transfer_get_all(context, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) @require_context @handle_db_data_error def transfer_create(context, values): if not values.get('id'): values['id'] = str(uuid.uuid4()) transfer_id = values['id'] volume_id = values['volume_id'] session = get_session() with session.begin(): expected = {'id': volume_id, 'status': 'available'} update = {'status': 'awaiting-transfer'} if not conditional_update(context, models.Volume, update, expected): msg = (_('Transfer %(transfer_id)s: Volume id %(volume_id)s ' 'expected in available state.') % {'transfer_id': transfer_id, 'volume_id': volume_id}) LOG.error(msg) raise exception.InvalidVolume(reason=msg) transfer = models.Transfer() transfer.update(values) session.add(transfer) return transfer @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def transfer_destroy(context, transfer_id): utcnow = timeutils.utcnow() session = get_session() with session.begin(): volume_id = _transfer_get(context, transfer_id, session)['volume_id'] expected = {'id': volume_id, 'status': 'awaiting-transfer'} update = {'status': 'available'} if not conditional_update(context, models.Volume, update, expected): # If the volume state is not 'awaiting-transfer' don't change it, # but we can still mark the transfer record as deleted. msg = (_('Transfer %(transfer_id)s: Volume expected in ' 'awaiting-transfer state.') % {'transfer_id': transfer_id}) LOG.error(msg) updated_values = {'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} (model_query(context, models.Transfer, session=session) .filter_by(id=transfer_id) .update(updated_values)) del updated_values['updated_at'] return updated_values def _roll_back_transferred_volume_and_snapshots(context, volume_id, old_user_id, old_project_id, transffered_snapshots): expected = {'id': volume_id, 'status': 'available'} update = {'status': 'awaiting-transfer', 'user_id': old_user_id, 'project_id': old_project_id, 'updated_at': timeutils.utcnow()} if not conditional_update(context, models.Volume, update, expected): LOG.warning('Volume: %(volume_id)s is not in the expected available ' 'status. Rolling it back.', {'volume_id': volume_id}) return for snapshot_id in transffered_snapshots: LOG.info('Beginning to roll back transferred snapshots: %s', snapshot_id) expected = {'id': snapshot_id, 'status': 'available'} update = {'user_id': old_user_id, 'project_id': old_project_id, 'updated_at': timeutils.utcnow()} if not conditional_update(context, models.Snapshot, update, expected): LOG.warning('Snapshot: %(snapshot_id)s is not in the expected ' 'available state. Rolling it back.', {'snapshot_id': snapshot_id}) return @require_context def transfer_accept(context, transfer_id, user_id, project_id, no_snapshots=False): session = get_session() with session.begin(): volume_id = _transfer_get(context, transfer_id, session)['volume_id'] expected = {'id': volume_id, 'status': 'awaiting-transfer'} update = {'status': 'available', 'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow()} if not conditional_update(context, models.Volume, update, expected): msg = (_('Transfer %(transfer_id)s: Volume id %(volume_id)s ' 'expected in awaiting-transfer state.') % {'transfer_id': transfer_id, 'volume_id': volume_id}) LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Update snapshots for transfer snapshots with volume. if not no_snapshots: snapshots = snapshot_get_all_for_volume(context, volume_id) transferred_snapshots = [] for snapshot in snapshots: LOG.info('Begin to transfer snapshot: %s', snapshot['id']) old_user_id = snapshot['user_id'] old_project_id = snapshot['project_id'] expected = {'id': snapshot['id'], 'status': 'available'} update = {'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow()} if not conditional_update(context, models.Snapshot, update, expected): msg = (_('Transfer %(transfer_id)s: Snapshot ' '%(snapshot_id)s is not in the expected ' 'available state.') % {'transfer_id': transfer_id, 'snapshot_id': snapshot['id']}) LOG.error(msg) _roll_back_transferred_volume_and_snapshots( context, volume_id, old_user_id, old_project_id, transferred_snapshots) raise exception.InvalidSnapshot(reason=msg) transferred_snapshots.append(snapshot['id']) (session.query(models.Transfer) .filter_by(id=transfer_id) .update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at'), 'destination_project_id': project_id, 'accepted': True})) ############################### @require_admin_context def _consistencygroup_data_get_for_project(context, project_id, session=None): query = model_query(context, func.count(models.ConsistencyGroup.id), read_deleted="no", session=session).\ filter_by(project_id=project_id) result = query.first() return (0, result[0] or 0) @require_context def _consistencygroup_get(context, consistencygroup_id, session=None): result = model_query(context, models.ConsistencyGroup, session=session, project_only=True).\ filter_by(id=consistencygroup_id).\ first() if not result: raise exception.ConsistencyGroupNotFound( consistencygroup_id=consistencygroup_id) return result @require_context def consistencygroup_get(context, consistencygroup_id): return _consistencygroup_get(context, consistencygroup_id) def _consistencygroups_get_query(context, session=None, project_only=False): return model_query(context, models.ConsistencyGroup, session=session, project_only=project_only) def _process_consistencygroups_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.ConsistencyGroup, filters): return query = query.filter_by(**filters) return query def _consistencygroup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.ConsistencyGroup, filters): return [] session = get_session() with session.begin(): # Generate the paginate query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.ConsistencyGroup) if query is None: return [] return query.all() @require_admin_context def consistencygroup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Retrieves all consistency groups. If no sort parameters are specified then the returned cgs are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: Filters for the query in the form of key/value. :returns: list of matching consistency groups """ return _consistencygroup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def consistencygroup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Retrieves all consistency groups in a project. If no sort parameters are specified then the returned cgs are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: Filters for the query in the form of key/value. :returns: list of matching consistency groups """ authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _consistencygroup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @handle_db_data_error @require_context def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None): cg_model = models.ConsistencyGroup values = values.copy() if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): if cg_snap_id: conditions = [cg_model.id == models.CGSnapshot.consistencygroup_id, models.CGSnapshot.id == cg_snap_id] elif cg_id: conditions = [cg_model.id == cg_id] else: conditions = None if conditions: # We don't want duplicated field values names = ['volume_type_id', 'availability_zone', 'host', 'cluster_name'] for name in names: values.pop(name, None) fields = [getattr(cg_model, name) for name in names] fields.extend(bindparam(k, v) for k, v in values.items()) sel = session.query(*fields).filter(*conditions) names.extend(values.keys()) insert_stmt = cg_model.__table__.insert().from_select(names, sel) result = session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise # the right exception if not result.rowcount: if cg_id: raise exception.ConsistencyGroupNotFound( consistencygroup_id=cg_id) raise exception.CgSnapshotNotFound(cgsnapshot_id=cg_snap_id) else: consistencygroup = cg_model() consistencygroup.update(values) session.add(consistencygroup) return _consistencygroup_get(context, values['id'], session=session) @handle_db_data_error @require_context def consistencygroup_update(context, consistencygroup_id, values): query = model_query(context, models.ConsistencyGroup, project_only=True) result = query.filter_by(id=consistencygroup_id).update(values) if not result: raise exception.ConsistencyGroupNotFound( consistencygroup_id=consistencygroup_id) @require_admin_context def consistencygroup_destroy(context, consistencygroup_id): utcnow = timeutils.utcnow() session = get_session() with session.begin(): updated_values = {'status': fields.ConsistencyGroupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} model_query(context, models.ConsistencyGroup, session=session).\ filter_by(id=consistencygroup_id).\ update({'status': fields.ConsistencyGroupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values @require_admin_context def cg_cgsnapshot_destroy_all_by_ids(context, cg_ids, cgsnapshot_ids, volume_ids, snapshot_ids, session): utcnow = timeutils.utcnow() if snapshot_ids: snaps = (model_query(context, models.Snapshot, session=session, read_deleted="no"). filter(models.Snapshot.id.in_(snapshot_ids)). all()) for snap in snaps: snap.update({'cgsnapshot_id': None, 'updated_at': utcnow}) if cgsnapshot_ids: cg_snaps = (model_query(context, models.CGSnapshot, session=session, read_deleted="no"). filter(models.CGSnapshot.id.in_(cgsnapshot_ids)). all()) for cg_snap in cg_snaps: cg_snap.delete(session=session) if volume_ids: vols = (model_query(context, models.Volume, session=session, read_deleted="no"). filter(models.Volume.id.in_(volume_ids)). all()) for vol in vols: vol.update({'consistencygroup_id': None, 'updated_at': utcnow}) if cg_ids: cgs = (model_query(context, models.ConsistencyGroup, session=session, read_deleted="no"). filter(models.ConsistencyGroup.id.in_(cg_ids)). all()) for cg in cgs: cg.delete(session=session) def cg_has_cgsnapshot_filter(): """Return a filter that checks if a CG has CG Snapshots.""" return sql.exists().where(and_( models.CGSnapshot.consistencygroup_id == models.ConsistencyGroup.id, ~models.CGSnapshot.deleted)) def cg_has_volumes_filter(attached_or_with_snapshots=False): """Return a filter to check if a CG has volumes. When attached_or_with_snapshots parameter is given a True value only attached volumes or those with snapshots will be considered. """ query = sql.exists().where( and_(models.Volume.consistencygroup_id == models.ConsistencyGroup.id, ~models.Volume.deleted)) if attached_or_with_snapshots: query = query.where(or_( models.Volume.attach_status == 'attached', sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted)))) return query def cg_creating_from_src(cg_id=None, cgsnapshot_id=None): """Return a filter to check if a CG is being used as creation source. Returned filter is meant to be used in the Conditional Update mechanism and checks if provided CG ID or CG Snapshot ID is currently being used to create another CG. This filter will not include CGs that have used the ID but have already finished their creation (status is no longer creating). Filter uses a subquery that allows it to be used on updates to the consistencygroups table. """ # NOTE(geguileo): As explained in devref api_conditional_updates we use a # subquery to trick MySQL into using the same table in the update and the # where clause. subq = sql.select([models.ConsistencyGroup]).where( and_(~models.ConsistencyGroup.deleted, models.ConsistencyGroup.status == 'creating')).alias('cg2') if cg_id: match_id = subq.c.source_cgid == cg_id elif cgsnapshot_id: match_id = subq.c.cgsnapshot_id == cgsnapshot_id else: msg = _('cg_creating_from_src must be called with cg_id or ' 'cgsnapshot_id parameter.') raise exception.ProgrammingError(reason=msg) return sql.exists([subq]).where(match_id) @require_admin_context def consistencygroup_include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all consistency groups matching the filters into a cluster.""" return _include_in_cluster(context, cluster, models.ConsistencyGroup, partial_rename, filters) @require_admin_context def group_include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all generic groups matching the filters into a cluster.""" return _include_in_cluster(context, cluster, models.Group, partial_rename, filters) ############################### @require_admin_context def _group_data_get_for_project(context, project_id, session=None): query = model_query(context, func.count(models.Group.id), read_deleted="no", session=session).\ filter_by(project_id=project_id) result = query.first() return (0, result[0] or 0) @require_context def _group_get(context, group_id, session=None): result = (model_query(context, models.Group, session=session, project_only=True). filter_by(id=group_id). first()) if not result: raise exception.GroupNotFound(group_id=group_id) return result @require_context def group_get(context, group_id): return _group_get(context, group_id) def _groups_get_query(context, session=None, project_only=False): return model_query(context, models.Group, session=session, project_only=project_only) def _group_snapshot_get_query(context, session=None, project_only=False): return model_query(context, models.GroupSnapshot, session=session, project_only=project_only) @apply_like_filters(model=models.Group) def _process_groups_filters(query, filters): if filters: # NOTE(xyang): backend_match_level needs to be handled before # is_valid_model_filters is called as it is not a column name # in the db. backend_match_level = filters.pop('backend_match_level', 'backend') # host is a valid filter. Filter the query by host and # backend_match_level first. host = filters.pop('host', None) if host: query = query.filter(_filter_host(models.Group.host, host, match_level=backend_match_level)) # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Group, filters): return query = query.filter_by(**filters) return query @apply_like_filters(model=models.GroupSnapshot) def _process_group_snapshot_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.GroupSnapshot, filters): return query = query.filter_by(**filters) return query def _group_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): # No need to call is_valid_model_filters here. It is called # in _process_group_filters when _generate_paginate_query # is called below. session = get_session() with session.begin(): # Generate the paginate query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Group) return query.all() if query else [] @require_admin_context def group_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Retrieves all groups. If no sort parameters are specified then the returned groups are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: Filters for the query in the form of key/value. :returns: list of matching groups """ return _group_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def group_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Retrieves all groups in a project. If no sort parameters are specified then the returned groups are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: Filters for the query in the form of key/value. :returns: list of matching groups """ authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _group_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @handle_db_data_error @require_context def group_create(context, values, group_snapshot_id=None, source_group_id=None): group_model = models.Group values = values.copy() if not values.get('id'): values['id'] = six.text_type(uuid.uuid4()) session = get_session() with session.begin(): if group_snapshot_id: conditions = [group_model.id == models.GroupSnapshot.group_id, models.GroupSnapshot.id == group_snapshot_id] elif source_group_id: conditions = [group_model.id == source_group_id] else: conditions = None if conditions: # We don't want duplicated field values values.pop('group_type_id', None) values.pop('availability_zone', None) values.pop('host', None) # NOTE(xyang): Save volume_type_ids to update later. volume_type_ids = values.pop('volume_type_ids', []) sel = session.query(group_model.group_type_id, group_model.availability_zone, group_model.host, *(bindparam(k, v) for k, v in values.items()) ).filter(*conditions) names = ['group_type_id', 'availability_zone', 'host'] names.extend(values.keys()) insert_stmt = group_model.__table__.insert().from_select( names, sel) result = session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise # the right exception if not result.rowcount: if source_group_id: raise exception.GroupNotFound( group_id=source_group_id) raise exception.GroupSnapshotNotFound( group_snapshot_id=group_snapshot_id) for item in volume_type_ids: mapping = models.GroupVolumeTypeMapping() mapping['volume_type_id'] = item mapping['group_id'] = values['id'] session.add(mapping) else: for item in values.get('volume_type_ids') or []: mapping = models.GroupVolumeTypeMapping() mapping['volume_type_id'] = item mapping['group_id'] = values['id'] session.add(mapping) group = group_model() group.update(values) session.add(group) return _group_get(context, values['id'], session=session) @handle_db_data_error @require_context def group_volume_type_mapping_create(context, group_id, volume_type_id): """Add group volume_type mapping entry.""" # Verify group exists _group_get(context, group_id) # Verify volume type exists _volume_type_get_id_from_volume_type(context, volume_type_id) existing = _group_volume_type_mapping_get_all_by_group_volume_type( context, group_id, volume_type_id) if existing: raise exception.GroupVolumeTypeMappingExists( group_id=group_id, volume_type_id=volume_type_id) mapping = models.GroupVolumeTypeMapping() mapping.update({"group_id": group_id, "volume_type_id": volume_type_id}) session = get_session() with session.begin(): try: mapping.save(session=session) except db_exc.DBDuplicateEntry: raise exception.GroupVolumeTypeMappingExists( group_id=group_id, volume_type_id=volume_type_id) return mapping @handle_db_data_error @require_context def group_update(context, group_id, values): query = model_query(context, models.Group, project_only=True) result = query.filter_by(id=group_id).update(values) if not result: raise exception.GroupNotFound(group_id=group_id) @require_admin_context def group_destroy(context, group_id): session = get_session() with session.begin(): (model_query(context, models.Group, session=session). filter_by(id=group_id). update({'status': fields.GroupStatus.DELETED, 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) (session.query(models.GroupVolumeTypeMapping). filter_by(group_id=group_id). update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) def group_has_group_snapshot_filter(): return sql.exists().where(and_( models.GroupSnapshot.group_id == models.Group.id, ~models.GroupSnapshot.deleted)) def group_has_volumes_filter(attached_or_with_snapshots=False): query = sql.exists().where( and_(models.Volume.group_id == models.Group.id, ~models.Volume.deleted)) if attached_or_with_snapshots: query = query.where(or_( models.Volume.attach_status == 'attached', sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted)))) return query def group_creating_from_src(group_id=None, group_snapshot_id=None): # NOTE(geguileo): As explained in devref api_conditional_updates we use a # subquery to trick MySQL into using the same table in the update and the # where clause. subq = sql.select([models.Group]).where( and_(~models.Group.deleted, models.Group.status == 'creating')).alias('group2') if group_id: match_id = subq.c.source_group_id == group_id elif group_snapshot_id: match_id = subq.c.group_snapshot_id == group_snapshot_id else: msg = _('group_creating_from_src must be called with group_id or ' 'group_snapshot_id parameter.') raise exception.ProgrammingError(reason=msg) return sql.exists([subq]).where(match_id) ############################### @require_context def _cgsnapshot_get(context, cgsnapshot_id, session=None): result = model_query(context, models.CGSnapshot, session=session, project_only=True).\ filter_by(id=cgsnapshot_id).\ first() if not result: raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) return result @require_context def cgsnapshot_get(context, cgsnapshot_id): return _cgsnapshot_get(context, cgsnapshot_id) def is_valid_model_filters(model, filters, exclude_list=None): """Return True if filter values exist on the model :param model: a Cinder model :param filters: dictionary of filters """ for key in filters.keys(): if exclude_list and key in exclude_list: continue if key == 'metadata': if not isinstance(filters[key], dict): LOG.debug("Metadata filter value is not valid dictionary") return False continue try: key = key.rstrip('~') getattr(model, key) except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return False return True def _cgsnapshot_get_all(context, project_id=None, group_id=None, filters=None): query = model_query(context, models.CGSnapshot) if filters: if not is_valid_model_filters(models.CGSnapshot, filters): return [] query = query.filter_by(**filters) if project_id: query = query.filter_by(project_id=project_id) if group_id: query = query.filter_by(consistencygroup_id=group_id) return query.all() @require_admin_context def cgsnapshot_get_all(context, filters=None): return _cgsnapshot_get_all(context, filters=filters) @require_admin_context def cgsnapshot_get_all_by_group(context, group_id, filters=None): return _cgsnapshot_get_all(context, group_id=group_id, filters=filters) @require_context def cgsnapshot_get_all_by_project(context, project_id, filters=None): authorize_project_context(context, project_id) return _cgsnapshot_get_all(context, project_id=project_id, filters=filters) @handle_db_data_error @require_context def cgsnapshot_create(context, values): if not values.get('id'): values['id'] = str(uuid.uuid4()) cg_id = values.get('consistencygroup_id') session = get_session() model = models.CGSnapshot with session.begin(): if cg_id: # There has to exist at least 1 volume in the CG and the CG cannot # be updating the composing volumes or being created. conditions = [ sql.exists().where(and_( ~models.Volume.deleted, models.Volume.consistencygroup_id == cg_id)), ~models.ConsistencyGroup.deleted, models.ConsistencyGroup.id == cg_id, ~models.ConsistencyGroup.status.in_(('creating', 'updating'))] # NOTE(geguileo): We build a "fake" from_select clause instead of # using transaction isolation on the session because we would need # SERIALIZABLE level and that would have a considerable performance # penalty. binds = (bindparam(k, v) for k, v in values.items()) sel = session.query(*binds).filter(*conditions) insert_stmt = model.__table__.insert().from_select(values.keys(), sel) result = session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise # the right exception if not result.rowcount: msg = _("Source CG cannot be empty or in 'creating' or " "'updating' state. No cgsnapshot will be created.") raise exception.InvalidConsistencyGroup(reason=msg) else: cgsnapshot = model() cgsnapshot.update(values) session.add(cgsnapshot) return _cgsnapshot_get(context, values['id'], session=session) @require_context @handle_db_data_error def cgsnapshot_update(context, cgsnapshot_id, values): query = model_query(context, models.CGSnapshot, project_only=True) result = query.filter_by(id=cgsnapshot_id).update(values) if not result: raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) @require_admin_context def cgsnapshot_destroy(context, cgsnapshot_id): session = get_session() with session.begin(): updated_values = {'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')} model_query(context, models.CGSnapshot, session=session).\ filter_by(id=cgsnapshot_id).\ update(updated_values) del updated_values['updated_at'] return updated_values def cgsnapshot_creating_from_src(): """Get a filter that checks if a CGSnapshot is being created from a CG.""" return sql.exists().where(and_( models.CGSnapshot.consistencygroup_id == models.ConsistencyGroup.id, ~models.CGSnapshot.deleted, models.CGSnapshot.status == 'creating')) ############################### @require_context def _group_snapshot_get(context, group_snapshot_id, session=None): result = model_query(context, models.GroupSnapshot, session=session, project_only=True).\ filter_by(id=group_snapshot_id).\ first() if not result: raise exception.GroupSnapshotNotFound( group_snapshot_id=group_snapshot_id) return result @require_context def group_snapshot_get(context, group_snapshot_id): return _group_snapshot_get(context, group_snapshot_id) def _group_snapshot_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.GroupSnapshot, filters): return [] session = get_session() with session.begin(): # Generate the paginate query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.GroupSnapshot) return query.all() if query else [] @require_admin_context def group_snapshot_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return _group_snapshot_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_admin_context def group_snapshot_get_all_by_group(context, group_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters is None: filters = {} if group_id: filters['group_id'] = group_id return _group_snapshot_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def group_snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): authorize_project_context(context, project_id) if filters is None: filters = {} if project_id: filters['project_id'] = project_id return _group_snapshot_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @handle_db_data_error @require_context def group_snapshot_create(context, values): if not values.get('id'): values['id'] = six.text_type(uuid.uuid4()) group_id = values.get('group_id') session = get_session() model = models.GroupSnapshot with session.begin(): if group_id: # There has to exist at least 1 volume in the group and the group # cannot be updating the composing volumes or being created. conditions = [ sql.exists().where(and_( ~models.Volume.deleted, models.Volume.group_id == group_id)), ~models.Group.deleted, models.Group.id == group_id, ~models.Group.status.in_(('creating', 'updating'))] # NOTE(geguileo): We build a "fake" from_select clause instead of # using transaction isolation on the session because we would need # SERIALIZABLE level and that would have a considerable performance # penalty. binds = (bindparam(k, v) for k, v in values.items()) sel = session.query(*binds).filter(*conditions) insert_stmt = model.__table__.insert().from_select(values.keys(), sel) result = session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise # the right exception if not result.rowcount: msg = _("Source group cannot be empty or in 'creating' or " "'updating' state. No group snapshot will be created.") raise exception.InvalidGroup(reason=msg) else: group_snapshot = model() group_snapshot.update(values) session.add(group_snapshot) return _group_snapshot_get(context, values['id'], session=session) @require_context @handle_db_data_error def group_snapshot_update(context, group_snapshot_id, values): session = get_session() with session.begin(): result = model_query(context, models.GroupSnapshot, project_only=True).\ filter_by(id=group_snapshot_id).\ first() if not result: raise exception.GroupSnapshotNotFound( _("No group snapshot with id %s") % group_snapshot_id) result.update(values) result.save(session=session) return result @require_admin_context def group_snapshot_destroy(context, group_snapshot_id): session = get_session() with session.begin(): updated_values = {'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')} model_query(context, models.GroupSnapshot, session=session).\ filter_by(id=group_snapshot_id).\ update(updated_values) del updated_values['updated_at'] return updated_values def group_snapshot_creating_from_src(): """Get a filter to check if a grp snapshot is being created from a grp.""" return sql.exists().where(and_( models.GroupSnapshot.group_id == models.Group.id, ~models.GroupSnapshot.deleted, models.GroupSnapshot.status == 'creating')) ############################### @require_admin_context def purge_deleted_rows(context, age_in_days): """Purge deleted rows older than age from cinder tables.""" try: age_in_days = int(age_in_days) except ValueError: msg = _('Invalid value for age, %(age)s') % {'age': age_in_days} LOG.exception(msg) raise exception.InvalidParameterValue(msg) engine = get_engine() session = get_session() metadata = MetaData() metadata.reflect(engine) for table in reversed(metadata.sorted_tables): if 'deleted' not in table.columns.keys(): continue LOG.info('Purging deleted rows older than age=%(age)d days ' 'from table=%(table)s', {'age': age_in_days, 'table': table}) deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days) try: with session.begin(): # Delete child records first from quality_of_service_specs # table to avoid FK constraints if six.text_type(table) == "quality_of_service_specs": session.query(models.QualityOfServiceSpecs).filter( and_(models.QualityOfServiceSpecs.specs_id.isnot( None), models.QualityOfServiceSpecs. deleted.is_(True), models.QualityOfServiceSpecs. deleted_at < deleted_age)).delete() result = session.execute( table.delete() .where(table.c.deleted_at < deleted_age)) except db_exc.DBReferenceError as ex: LOG.error('DBError detected when purging from ' '%(tablename)s: %(error)s.', {'tablename': table, 'error': ex}) raise rows_purged = result.rowcount if rows_purged != 0: LOG.info("Deleted %(row)d rows from table=%(table)s", {'row': rows_purged, 'table': table}) ############################### @require_admin_context def reset_active_backend(context, enable_replication, active_backend_id, backend_host): service = objects.Service.get_by_host_and_topic(context, backend_host, 'cinder-volume', disabled=True) if not service.frozen: raise exception.ServiceUnavailable( 'Service for host %(host)s must first be frozen.' % {'host': backend_host}) actions = { 'disabled': False, 'disabled_reason': '', 'active_backend_id': None, 'replication_status': 'enabled', } expectations = { 'frozen': True, 'disabled': True, } if service.is_clustered: service.cluster.conditional_update(actions, expectations) service.cluster.reset_service_replication() else: service.conditional_update(actions, expectations) ############################### def _translate_messages(messages): return [_translate_message(message) for message in messages] def _translate_message(message): """Translate the Message model to a dict.""" return { 'id': message['id'], 'project_id': message['project_id'], 'request_id': message['request_id'], 'resource_type': message['resource_type'], 'resource_uuid': message.get('resource_uuid'), 'event_id': message['event_id'], 'detail_id': message['detail_id'], 'action_id': message['action_id'], 'message_level': message['message_level'], 'created_at': message['created_at'], 'expires_at': message.get('expires_at'), } def _message_get(context, message_id, session=None): query = model_query(context, models.Message, read_deleted="no", project_only="yes", session=session) result = query.filter_by(id=message_id).first() if not result: raise exception.MessageNotFound(message_id=message_id) return result @require_context def message_get(context, message_id, session=None): result = _message_get(context, message_id, session) return _translate_message(result) @require_context def message_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Retrieves all messages. If no sort parameters are specified then the returned messages are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_messages_filters function for more information :returns: list of matching messages """ messages = models.Message session = get_session() with session.begin(): # Generate the paginate query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, messages) if query is None: return [] results = query.all() return _translate_messages(results) @apply_like_filters(model=models.Message) def _process_messages_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Message, filters): return None query = query.filter_by(**filters) return query def _messages_get_query(context, session=None, project_only=False): return model_query(context, models.Message, session=session, project_only=project_only) @require_context def message_create(context, values): message_ref = models.Message() if not values.get('id'): values['id'] = str(uuid.uuid4()) message_ref.update(values) session = get_session() with session.begin(): session.add(message_ref) @require_admin_context def message_destroy(context, message): session = get_session() now = timeutils.utcnow() with session.begin(): updated_values = {'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')} (model_query(context, models.Message, session=session). filter_by(id=message.get('id')). update(updated_values)) del updated_values['updated_at'] return updated_values @require_admin_context def cleanup_expired_messages(context): session = get_session() now = timeutils.utcnow() with session.begin(): # NOTE(tommylikehu): Directly delete the expired # messages here. return session.query(models.Message).filter( models.Message.expires_at < now).delete() ############################### @require_context def driver_initiator_data_insert_by_key(context, initiator, namespace, key, value): data = models.DriverInitiatorData() data.initiator = initiator data.namespace = namespace data.key = key data.value = value session = get_session() try: with session.begin(): session.add(data) return True except db_exc.DBDuplicateEntry: return False @require_context def driver_initiator_data_get(context, initiator, namespace): session = get_session() with session.begin(): return session.query(models.DriverInitiatorData).\ filter_by(initiator=initiator).\ filter_by(namespace=namespace).\ all() ############################### PAGINATION_HELPERS = { models.Volume: (_volume_get_query, _process_volume_filters, _volume_get), models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get), models.Backup: (_backups_get_query, _process_backups_filters, _backup_get), models.QualityOfServiceSpecs: (_qos_specs_get_query, _process_qos_specs_filters, _qos_specs_get), models.VolumeType: (_volume_type_get_query, _process_volume_types_filters, _volume_type_get_db_object), models.ConsistencyGroup: (_consistencygroups_get_query, _process_consistencygroups_filters, _consistencygroup_get), models.Message: (_messages_get_query, _process_messages_filters, _message_get), models.GroupType: (_group_type_get_query, _process_group_types_filters, _group_type_get_db_object), models.Group: (_groups_get_query, _process_groups_filters, _group_get), models.GroupSnapshot: (_group_snapshot_get_query, _process_group_snapshot_filters, _group_snapshot_get), models.VolumeAttachment: (_attachment_get_query, _process_attachment_filters, _attachment_get), models.Transfer: (_transfer_get_query, _process_transfer_filters, _transfer_get), } CALCULATE_COUNT_HELPERS = { 'volume': (_volume_get_query, _process_volume_filters), 'snapshot': (_snaps_get_query, _process_snaps_filters), 'backup': (_backups_get_query, _process_backups_filters), } ############################### @require_context def image_volume_cache_create(context, host, cluster_name, image_id, image_updated_at, volume_id, size): session = get_session() with session.begin(): cache_entry = models.ImageVolumeCacheEntry() cache_entry.host = host cache_entry.cluster_name = cluster_name cache_entry.image_id = image_id cache_entry.image_updated_at = image_updated_at cache_entry.volume_id = volume_id cache_entry.size = size session.add(cache_entry) return cache_entry @require_context def image_volume_cache_delete(context, volume_id): session = get_session() with session.begin(): session.query(models.ImageVolumeCacheEntry).\ filter_by(volume_id=volume_id).\ delete() @require_context def image_volume_cache_get_and_update_last_used(context, image_id, **filters): filters = _clean_filters(filters) session = get_session() with session.begin(): entry = session.query(models.ImageVolumeCacheEntry).\ filter_by(image_id=image_id).\ filter_by(**filters).\ order_by(desc(models.ImageVolumeCacheEntry.last_used)).\ first() if entry: entry.last_used = timeutils.utcnow() entry.save(session=session) return entry @require_context def image_volume_cache_get_by_volume_id(context, volume_id): session = get_session() with session.begin(): return session.query(models.ImageVolumeCacheEntry).\ filter_by(volume_id=volume_id).\ first() @require_context def image_volume_cache_get_all(context, **filters): filters = _clean_filters(filters) session = get_session() with session.begin(): return session.query(models.ImageVolumeCacheEntry).\ filter_by(**filters).\ order_by(desc(models.ImageVolumeCacheEntry.last_used)).\ all() @require_admin_context def image_volume_cache_include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all volumes matching the filters into a cluster.""" filters = _clean_filters(filters) return _include_in_cluster(context, cluster, models.ImageVolumeCacheEntry, partial_rename, filters) ################### def _worker_query(context, session=None, until=None, db_filters=None, ignore_sentinel=True, **filters): # Remove all filters based on the workers table that are set to None filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Worker, filters): return None query = model_query(context, models.Worker, session=session) # TODO(geguileo): Once we remove support for MySQL 5.5 we can remove this if ignore_sentinel: # We don't want to retrieve the workers sentinel query = query.filter(models.Worker.resource_type != 'SENTINEL') if until: db_filters = list(db_filters) if db_filters else [] # Since we set updated_at at creation time we don't need to check # created_at field. db_filters.append(models.Worker.updated_at <= until) if db_filters: query = query.filter(and_(*db_filters)) if filters: query = query.filter_by(**filters) return query DB_SUPPORTS_SUBSECOND_RESOLUTION = True def workers_init(): """Check if DB supports subsecond resolution and set global flag. MySQL 5.5 doesn't support subsecond resolution in datetime fields, so we have to take it into account when working with the worker's table. To do this we'll have 1 row in the DB, created by the migration script, where we have tried to set the microseconds and we'll check it. Once we drop support for MySQL 5.5 we can remove this method. """ global DB_SUPPORTS_SUBSECOND_RESOLUTION session = get_session() query = session.query(models.Worker).filter_by(resource_type='SENTINEL') worker = query.first() DB_SUPPORTS_SUBSECOND_RESOLUTION = bool(worker.updated_at.microsecond) def _worker_set_updated_at_field(values): # TODO(geguileo): Once we drop support for MySQL 5.5 we can simplify this # method. updated_at = values.get('updated_at', timeutils.utcnow()) if isinstance(updated_at, six.string_types): return if not DB_SUPPORTS_SUBSECOND_RESOLUTION: updated_at = updated_at.replace(microsecond=0) values['updated_at'] = updated_at @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def worker_create(context, **values): """Create a worker entry from optional arguments.""" _worker_set_updated_at_field(values) worker = models.Worker(**values) session = get_session() try: with session.begin(): worker.save(session) except db_exc.DBDuplicateEntry: raise exception.WorkerExists(type=values.get('resource_type'), id=values.get('resource_id')) return worker def worker_get(context, **filters): """Get a worker or raise exception if it does not exist.""" query = _worker_query(context, **filters) worker = query.first() if query else None if not worker: raise exception.WorkerNotFound(**filters) return worker def worker_get_all(context, **filters): """Get all workers that match given criteria.""" query = _worker_query(context, **filters) return query.all() if query else [] def _orm_worker_update(worker, values): if not worker: return for key, value in values.items(): setattr(worker, key, value) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def worker_update(context, id, filters=None, orm_worker=None, **values): """Update a worker with given values.""" filters = filters or {} query = _worker_query(context, id=id, **filters) # If we want to update the orm_worker and we don't set the update_at field # we set it here instead of letting SQLAlchemy do it to be able to update # the orm_worker. _worker_set_updated_at_field(values) reference = orm_worker or models.Worker values['race_preventer'] = reference.race_preventer + 1 result = query.update(values) if not result: raise exception.WorkerNotFound(id=id, **filters) _orm_worker_update(orm_worker, values) return result @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def worker_claim_for_cleanup(context, claimer_id, orm_worker): """Claim a worker entry for cleanup.""" # We set updated_at value so we are sure we update the DB entry even if the # service_id is the same in the DB, thus flagging the claim. values = {'service_id': claimer_id, 'race_preventer': orm_worker.race_preventer + 1, 'updated_at': timeutils.utcnow()} _worker_set_updated_at_field(values) # We only update the worker entry if it hasn't been claimed by other host # or thread query = _worker_query(context, status=orm_worker.status, service_id=orm_worker.service_id, race_preventer=orm_worker.race_preventer, until=orm_worker.updated_at, id=orm_worker.id) result = query.update(values, synchronize_session=False) if result: _orm_worker_update(orm_worker, values) return result def worker_destroy(context, **filters): """Delete a worker (no soft delete).""" query = _worker_query(context, **filters) return query.delete() ############################### @require_context def resource_exists(context, model, resource_id, session=None): conditions = [model.id == resource_id] # Match non deleted resources by the id if 'no' == context.read_deleted: conditions.append(~model.deleted) # If the context is not admin we limit it to the context's project if is_user_context(context) and hasattr(model, 'project_id'): conditions.append(model.project_id == context.project_id) session = session or get_session() query = session.query(sql.exists().where(and_(*conditions))) return query.scalar() def get_model_for_versioned_object(versioned_object): if isinstance(versioned_object, six.string_types): model_name = versioned_object else: model_name = versioned_object.obj_name() if model_name == 'BackupImport': return models.Backup return getattr(models, model_name) def _get_get_method(model): # Exceptions to model to get methods, in general method names are a simple # conversion changing ORM name from camel case to snake format and adding # _get to the string GET_EXCEPTIONS = { models.ConsistencyGroup: consistencygroup_get, models.VolumeType: _volume_type_get_full, models.QualityOfServiceSpecs: qos_specs_get, models.GroupType: _group_type_get_full, models.CGSnapshot: cgsnapshot_get, } if model in GET_EXCEPTIONS: return GET_EXCEPTIONS[model] # General conversion # Convert camel cased model name to snake format s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__) # Get method must be snake formatted model name concatenated with _get method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get' return globals().get(method_name) _GET_METHODS = {} @require_context def get_by_id(context, model, id, *args, **kwargs): # Add get method to cache dictionary if it's not already there if not _GET_METHODS.get(model): _GET_METHODS[model] = _get_get_method(model) return _GET_METHODS[model](context, id, *args, **kwargs) def condition_db_filter(model, field, value): """Create matching filter. If value is an iterable other than a string, any of the values is a valid match (OR), so we'll use SQL IN operator. If it's not an iterator == operator will be used. """ orm_field = getattr(model, field) # For values that must match and are iterables we use IN if (isinstance(value, abc.Iterable) and not isinstance(value, six.string_types)): # We cannot use in_ when one of the values is None if None not in value: return orm_field.in_(value) return or_(orm_field == v for v in value) # For values that must match and are not iterables we use == return orm_field == value def condition_not_db_filter(model, field, value, auto_none=True): """Create non matching filter. If value is an iterable other than a string, any of the values is a valid match (OR), so we'll use SQL IN operator. If it's not an iterator == operator will be used. If auto_none is True then we'll consider NULL values as different as well, like we do in Python and not like SQL does. """ result = ~condition_db_filter(model, field, value) if (auto_none and ((isinstance(value, abc.Iterable) and not isinstance(value, six.string_types) and None not in value) or (value is not None))): orm_field = getattr(model, field) result = or_(result, orm_field.is_(None)) return result def is_orm_value(obj): """Check if object is an ORM field or expression.""" return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute, sqlalchemy.sql.expression.ColumnElement)) def _check_is_not_multitable(values, model): """Check that we don't try to do multitable updates. Since PostgreSQL doesn't support multitable updates we want to always fail if we have such a query in our code, even if with MySQL it would work. """ used_models = set() for field in values: if isinstance(field, sqlalchemy.orm.attributes.InstrumentedAttribute): used_models.add(field.class_) elif isinstance(field, six.string_types): used_models.add(model) else: raise exception.ProgrammingError( reason='DB Conditional update - Unknown field type, must be ' 'string or ORM field.') if len(used_models) > 1: raise exception.ProgrammingError( reason='DB Conditional update - Error in query, multitable ' 'updates are not supported.') @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def conditional_update(context, model, values, expected_values, filters=(), include_deleted='no', project_only=False, order=None): """Compare-and-swap conditional update SQLAlchemy implementation.""" _check_is_not_multitable(values, model) # Provided filters will become part of the where clause where_conds = list(filters) # Build where conditions with operators ==, !=, NOT IN and IN for field, condition in expected_values.items(): if not isinstance(condition, db.Condition): condition = db.Condition(condition, field) where_conds.append(condition.get_filter(model, field)) # Create the query with the where clause query = model_query(context, model, read_deleted=include_deleted, project_only=project_only).filter(*where_conds) # NOTE(geguileo): Some DBs' update method are order dependent, and they # behave differently depending on the order of the values, example on a # volume with 'available' status: # UPDATE volumes SET previous_status=status, status='reyping' # WHERE id='44f284f9-877d-4fce-9eb4-67a052410054'; # Will result in a volume with 'retyping' status and 'available' # previous_status both on SQLite and MariaDB, but # UPDATE volumes SET status='retyping', previous_status=status # WHERE id='44f284f9-877d-4fce-9eb4-67a052410054'; # Will yield the same result in SQLite but will result in a volume with # status and previous_status set to 'retyping' in MariaDB, which is not # what we want, so order must be taken into consideration. # Order for the update will be: # 1- Order specified in argument order # 2- Values that refer to other ORM field (simple and using operations, # like size + 10) # 3- Values that use Case clause (since they may be using fields as well) # 4- All other values order = list(order) if order else tuple() orm_field_list = [] case_list = [] unordered_list = [] for key, value in values.items(): if isinstance(value, db.Case): value = case(value.whens, value.value, value.else_) if key in order: order[order.index(key)] = (key, value) continue # NOTE(geguileo): Check Case first since it's a type of orm value if isinstance(value, sql.elements.Case): value_list = case_list elif is_orm_value(value): value_list = orm_field_list else: value_list = unordered_list value_list.append((key, value)) update_args = {'synchronize_session': False} # If we don't have to enforce any kind of order just pass along the values # dictionary since it will be a little more efficient. if order or orm_field_list or case_list: # If we are doing an update with ordered parameters, we need to add # remaining values to the list values = itertools.chain(order, orm_field_list, case_list, unordered_list) # And we have to tell SQLAlchemy that we want to preserve the order update_args['update_args'] = {'preserve_parameter_order': True} # Return True if we were able to change any DB entry, False otherwise result = query.update(values, **update_args) return 0 != result
37.058622
79
0.626855
import collections from collections import abc import datetime as dt import functools import itertools import re import sys import uuid from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db import options from oslo_db.sqlalchemy import enginefacade from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') import six import sqlalchemy from sqlalchemy import MetaData from sqlalchemy import or_, and_, case from sqlalchemy.orm import joinedload, joinedload_all, undefer_group, load_only from sqlalchemy.orm import RelationshipProperty from sqlalchemy import sql from sqlalchemy.sql.expression import bindparam from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import true from sqlalchemy.sql import func from sqlalchemy.sql import sqltypes from cinder.api import common from cinder.common import sqlalchemyutils from cinder import db from cinder.db.sqlalchemy import models from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite') main_context_manager = enginefacade.transaction_context() def configure(conf): main_context_manager.configure(**dict(conf.database)) CONF.import_group("profiler", "cinder.service") if CONF.profiler.enabled: if CONF.profiler.trace_sqlalchemy: lambda eng: osprofiler_sqlalchemy.add_tracing(sqlalchemy, eng, "db") def get_engine(use_slave=False): return main_context_manager._factory.get_legacy_facade().get_engine( use_slave=use_slave) def get_session(use_slave=False, **kwargs): return main_context_manager._factory.get_legacy_facade().get_session( use_slave=use_slave, **kwargs) def dispose_engine(): get_engine().dispose() _DEFAULT_QUOTA_NAME = 'default' def get_backend(): return sys.modules[__name__] def is_admin_context(context): if not context: raise exception.CinderException( 'Use of empty request context is deprecated') return context.is_admin def is_user_context(context): if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_volume_exists(f): @functools.wraps(f) def wrapper(context, volume_id, *args, **kwargs): if not resource_exists(context, models.Volume, volume_id): raise exception.VolumeNotFound(volume_id=volume_id) return f(context, volume_id, *args, **kwargs) return wrapper def require_snapshot_exists(f): @functools.wraps(f) def wrapper(context, snapshot_id, *args, **kwargs): if not resource_exists(context, models.Snapshot, snapshot_id): raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return f(context, snapshot_id, *args, **kwargs) return wrapper def require_backup_exists(f): @functools.wraps(f) def wrapper(context, backup_id, *args, **kwargs): if not resource_exists(context, models.Backup, backup_id): raise exception.BackupNotFound(backup_id=backup_id) return f(context, backup_id, *args, **kwargs) return wrapper def handle_db_data_error(f): def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) return wrapper def model_query(context, model, *args, **kwargs): session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(model, *args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass elif read_deleted == 'only': query = query.filter_by(deleted=True) elif read_deleted == 'int_no': query = query.filter_by(deleted=0) else: raise Exception( _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): if model is models.VolumeAttachment: query = query.filter(models.Volume.project_id == context.project_id) else: query = query.filter_by(project_id=context.project_id) return query def _sync_volumes(context, project_id, session, volume_type_id=None, volume_type_name=None): (volumes, _gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'volumes' if volume_type_name: key += '_' + volume_type_name return {key: volumes} def _sync_snapshots(context, project_id, session, volume_type_id=None, volume_type_name=None): (snapshots, _gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'snapshots' if volume_type_name: key += '_' + volume_type_name return {key: snapshots} def _sync_backups(context, project_id, session, volume_type_id=None, volume_type_name=None): (backups, _gigs) = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'backups' return {key: backups} def _sync_gigabytes(context, project_id, session, volume_type_id=None, volume_type_name=None): (_junk, vol_gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'gigabytes' if volume_type_name: key += '_' + volume_type_name if CONF.no_snapshot_gb_quota: return {key: vol_gigs} (_junk, snap_gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) return {key: vol_gigs + snap_gigs} def _sync_consistencygroups(context, project_id, session, volume_type_id=None, volume_type_name=None): (_junk, groups) = _consistencygroup_data_get_for_project( context, project_id, session=session) key = 'consistencygroups' return {key: groups} def _sync_groups(context, project_id, session, volume_type_id=None, volume_type_name=None): (_junk, groups) = _group_data_get_for_project( context, project_id, session=session) key = 'groups' return {key: groups} def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None, volume_type_name=None): key = 'backup_gigabytes' (_junk, backup_gigs) = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) return {key: backup_gigs} QUOTA_SYNC_FUNCTIONS = { '_sync_volumes': _sync_volumes, '_sync_snapshots': _sync_snapshots, '_sync_gigabytes': _sync_gigabytes, '_sync_consistencygroups': _sync_consistencygroups, '_sync_backups': _sync_backups, '_sync_backup_gigabytes': _sync_backup_gigabytes, '_sync_groups': _sync_groups, } evel is None: if '#' in value: match_level = 'pool' elif '@' in value: match_level = 'backend' else: match_level = 'host' conn_str = CONF.database.connection if conn_str.startswith('mysql') and conn_str[5] in ['+', ':']: cmp_value = func.binary(value) like_op = 'LIKE BINARY' else: cmp_value = value like_op = 'LIKE' conditions = [field == cmp_value] if match_level != 'pool': conditions.append(field.op(like_op)(value + '#%')) if match_level == 'host': conditions.append(field.op(like_op)(value + '@%')) return or_(*conditions) def _filter_time_comparison(field, time_filter_dict): conditions = [] for operator in time_filter_dict: filter_time = timeutils.normalize_time(time_filter_dict[operator]) if operator == 'gt': conditions.append(field.op('>')(filter_time)) elif operator == 'gte': conditions.append(field.op('>=')(filter_time)) if operator == 'eq': conditions.append(field.op('=')(filter_time)) elif operator == 'neq': conditions.append(field.op('!=')(filter_time)) if operator == 'lt': conditions.append(field.op('<')(filter_time)) elif operator == 'lte': conditions.append(field.op('<=')(filter_time)) return or_(*conditions) def _clustered_bool_field_filter(query, field_name, filter_value): # itself is disabled/frozen. if filter_value is not None: query_filter = or_( and_(models.Service.cluster_name.is_(None), getattr(models.Service, field_name)), and_(models.Service.cluster_name.isnot(None), sql.exists().where(and_( models.Cluster.name == models.Service.cluster_name, models.Cluster.binary == models.Service.binary, ~models.Cluster.deleted, getattr(models.Cluster, field_name))))) if not filter_value: query_filter = ~query_filter query = query.filter(query_filter) return query def _service_query(context, session=None, read_deleted='no', host=None, cluster_name=None, is_up=None, host_or_cluster=None, backend_match_level=None, disabled=None, frozen=None, **filters): filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Service, filters): return None query = model_query(context, models.Service, session=session, read_deleted=read_deleted) # Host and cluster are particular cases of filters, because we must # retrieve not only exact matches (single backend configuration), but also # match those that have the backend defined (multi backend configuration). if host: query = query.filter(_filter_host(models.Service.host, host, backend_match_level)) if cluster_name: query = query.filter(_filter_host(models.Service.cluster_name, cluster_name, backend_match_level)) if host_or_cluster: query = query.filter(or_( _filter_host(models.Service.host, host_or_cluster, backend_match_level), _filter_host(models.Service.cluster_name, host_or_cluster, backend_match_level), )) query = _clustered_bool_field_filter(query, 'disabled', disabled) query = _clustered_bool_field_filter(query, 'frozen', frozen) if filters: query = query.filter_by(**filters) if is_up is not None: date_limit = utils.service_expired_time() svc = models.Service filter_ = or_( and_(svc.created_at.isnot(None), svc.created_at >= date_limit), and_(svc.updated_at.isnot(None), svc.updated_at >= date_limit)) query = query.filter(filter_ == is_up) return query @require_admin_context def service_destroy(context, service_id): query = _service_query(context, id=service_id) updated_values = models.Service.delete_values() if not query.update(updated_values): raise exception.ServiceNotFound(service_id=service_id) return updated_values @require_admin_context def service_get(context, service_id=None, backend_match_level=None, **filters): query = _service_query(context, backend_match_level=backend_match_level, id=service_id, **filters) service = None if not query else query.first() if not service: serv_id = service_id or filters.get('topic') or filters.get('binary') raise exception.ServiceNotFound(service_id=serv_id, host=filters.get('host')) return service @require_admin_context def service_get_all(context, backend_match_level=None, **filters): query = _service_query(context, backend_match_level=backend_match_level, **filters) return [] if not query else query.all() @require_admin_context def service_get_by_uuid(context, service_uuid): query = model_query(context, models.Service).filter_by(uuid=service_uuid) result = query.first() if not result: raise exception.ServiceNotFound(service_id=service_uuid) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True session = get_session() with session.begin(): service_ref.save(session) return service_ref @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def service_update(context, service_id, values): if 'disabled' in values: values = values.copy() values['modified_at'] = values.get('modified_at', timeutils.utcnow()) values['updated_at'] = values.get('updated_at', literal_column('updated_at')) query = _service_query(context, id=service_id) result = query.update(values) if not result: raise exception.ServiceNotFound(service_id=service_id) @enginefacade.writer def untyped_volumes_online_data_migration(context, max_count): from cinder.volume import volume_types default_type = volume_types.get_volume_type_by_name(context, '__DEFAULT__') # get all volumes having volume_type=None total = 0 updated = 0 session = get_session() with session.begin(): total = model_query(context, models.Volume, session=session).filter_by( volume_type_id=None).limit(max_count).count() volumes = model_query(context, models.Volume, session=session).filter_by( volume_type_id=None).limit(max_count).all() for volume in volumes: volume.volume_type_id = default_type.get('id') updated += 1 return total, updated @enginefacade.writer def untyped_snapshots_online_data_migration(context, max_count): from cinder.volume import volume_types default_type = volume_types.get_volume_type_by_name(context, '__DEFAULT__') # get all snapshots having volume_type=None total = 0 updated = 0 session = get_session() with session.begin(): total = model_query(context, models.Snapshot, session=session).filter_by( volume_type_id=None).limit(max_count).count() snapshots = model_query(context, models.Snapshot, session=session).filter_by( volume_type_id=None).limit(max_count).all() for snapshot in snapshots: snapshot.volume_type_id = default_type.get('id') updated += 1 return total, updated ################### @require_admin_context def is_backend_frozen(context, host, cluster_name): if cluster_name: model = models.Cluster conditions = [model.name == volume_utils.extract_host(cluster_name)] else: model = models.Service conditions = [model.host == volume_utils.extract_host(host)] conditions.extend((~model.deleted, model.frozen)) query = get_session().query(sql.exists().where(and_(*conditions))) frozen = query.scalar() return frozen ################### def _cluster_query(context, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, name=None, session=None, **filters): filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Cluster, filters): return None query = model_query(context, models.Cluster, session=session, read_deleted=read_deleted) # Cluster is a special case of filter, because we must match exact match # as well as hosts that specify the backend if name: query = query.filter(_filter_host(models.Cluster.name, name, name_match_level)) if filters: query = query.filter_by(**filters) if services_summary: query = query.options(undefer_group('services_summary')) # We bind the expiration time to now (as it changes with each query) # and is required by num_down_hosts query = query.params(expired=utils.service_expired_time()) elif 'num_down_hosts' in filters: query = query.params(expired=utils.service_expired_time()) if get_services: query = query.options(joinedload_all('services')) if is_up is not None: date_limit = utils.service_expired_time() filter_ = and_(models.Cluster.last_heartbeat.isnot(None), models.Cluster.last_heartbeat >= date_limit) query = query.filter(filter_ == is_up) return query @require_admin_context def cluster_get(context, id=None, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, **filters): query = _cluster_query(context, is_up, get_services, services_summary, read_deleted, name_match_level, id=id, **filters) cluster = None if not query else query.first() if not cluster: cluster_id = id or six.text_type(filters) raise exception.ClusterNotFound(id=cluster_id) return cluster @require_admin_context def cluster_get_all(context, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, **filters): query = _cluster_query(context, is_up, get_services, services_summary, read_deleted, name_match_level, **filters) return [] if not query else query.all() @require_admin_context def cluster_create(context, values): cluster_ref = models.Cluster() cluster_ref.update(values) # Provided disabled value takes precedence if values.get('disabled') is None: cluster_ref.disabled = not CONF.enable_new_services session = get_session() try: with session.begin(): cluster_ref.save(session) # We mark that newly created cluster has no hosts to prevent # problems at the OVO level cluster_ref.last_heartbeat = None return cluster_ref # If we had a race condition (another non deleted cluster exists with the # same name) raise Duplicate exception. except db_exc.DBDuplicateEntry: raise exception.ClusterExists(name=values.get('name')) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def cluster_update(context, id, values): query = _cluster_query(context, id=id) result = query.update(values) if not result: raise exception.ClusterNotFound(id=id) @require_admin_context def cluster_destroy(context, id): query = _cluster_query(context, id=id) query = query.filter(models.Cluster.num_hosts == 0) # If the update doesn't succeed we don't know if it's because the result = query.update(models.Cluster.delete_values(), synchronize_session=False) if not result: # This will fail if the cluster doesn't exist raising the right cluster_get(context, id=id) raise exception.ClusterHasHosts(id=id) ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.items(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs def _dict_with_extra_specs_if_authorized(context, inst_type_query): inst_type_dict = dict(inst_type_query) extra_specs = {x['key']: x['value'] for x in inst_type_query['extra_specs']} inst_type_dict['extra_specs'] = extra_specs return inst_type_dict ################### def _dict_with_group_specs_if_authorized(context, inst_type_query): inst_type_dict = dict(inst_type_query) if not is_admin_context(context): del(inst_type_dict['group_specs']) else: group_specs = {x['key']: x['value'] for x in inst_type_query['group_specs']} inst_type_dict['group_specs'] = group_specs return inst_type_dict ################### @require_context def _quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get(context, project_id, resource): return _quota_get(context, project_id, resource) @require_context def quota_get_all_by_project(context, project_id): rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_allocated_get_all_by_project(context, project_id, session=None): rows = model_query(context, models.Quota, read_deleted='no', session=session).filter_by(project_id=project_id).all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.allocated return result @require_context def _quota_get_all_by_resource(context, resource, session=None): rows = model_query(context, models.Quota, session=session, read_deleted='no').filter_by( resource=resource).all() return rows @require_context def quota_create(context, project_id, resource, limit, allocated): quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit if allocated: quota_ref.allocated = allocated session = get_session() with session.begin(): quota_ref.save(session) return quota_ref @require_context def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit return quota_ref @require_context def quota_update_resource(context, old_res, new_res): session = get_session() with session.begin(): quotas = _quota_get_all_by_resource(context, old_res, session=session) for quota in quotas: quota.resource = new_res @require_admin_context def quota_allocated_update(context, project_id, resource, allocated): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.allocated = allocated return quota_ref @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) return quota_ref.delete(session=session) ################### @require_context def _quota_class_get(context, class_name, resource, session=None): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @require_context def quota_class_get(context, class_name, resource): return _quota_class_get(context, class_name, resource) def quota_class_get_defaults(context): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=_DEFAULT_QUOTA_NAME).all() result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_class_get_all_by_name(context, class_name): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_context def _quota_class_get_all_by_resource(context, resource, session): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(resource=resource).\ all() return result @handle_db_data_error @require_context def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit session = get_session() with session.begin(): quota_class_ref.save(session) return quota_class_ref @require_context def quota_class_update(context, class_name, resource, limit): session = get_session() with session.begin(): quota_class_ref = _quota_class_get(context, class_name, resource, session=session) quota_class_ref.hard_limit = limit return quota_class_ref @require_context def quota_class_update_resource(context, old_res, new_res): session = get_session() with session.begin(): quota_class_list = _quota_class_get_all_by_resource( context, old_res, session) for quota_class in quota_class_list: quota_class.resource = new_res @require_context def quota_class_destroy(context, class_name, resource): session = get_session() with session.begin(): quota_class_ref = _quota_class_get(context, class_name, resource, session=session) return quota_class_ref.delete(session=session) @require_context def quota_class_destroy_all_by_name(context, class_name): session = get_session() with session.begin(): quota_classes = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ all() for quota_class_ref in quota_classes: quota_class_ref.delete(session=session) ################### @require_context def quota_usage_get(context, project_id, resource): result = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result @require_context def quota_usage_get_all_by_project(context, project_id): rows = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result @require_admin_context def _quota_usage_create(context, project_id, resource, in_use, reserved, until_refresh, session=None): quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.save(session=session) return quota_usage_ref ################### def _reservation_create(context, uuid, usage, project_id, resource, delta, expire, session=None, allocated_id=None): usage_id = usage['id'] if usage else None reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage_id reservation_ref.project_id = project_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.allocated_id = allocated_id reservation_ref.save(session=session) return reservation_ref ################### # NOTE(johannes): The quota code uses SQL locking to ensure races don't def _get_quota_usages(context, session, project_id, resources=None): query = model_query(context, models.QuotaUsage, read_deleted="no", session=session).filter_by(project_id=project_id) if resources: query = query.filter(models.QuotaUsage.resource.in_(list(resources))) rows = query.order_by(models.QuotaUsage.id.asc()).\ with_for_update().all() return {row.resource: row for row in rows} def _get_quota_usages_by_resource(context, session, resource): rows = model_query(context, models.QuotaUsage, deleted="no", session=session).\ filter_by(resource=resource).\ order_by(models.QuotaUsage.id.asc()).\ with_for_update().\ all() return rows @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_usage_update_resource(context, old_res, new_res): session = get_session() with session.begin(): usages = _get_quota_usages_by_resource(context, session, old_res) for usage in usages: usage.resource = new_res usage.until_refresh = 1 @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None, is_allocated_reserve=False): elevated = context.elevated() session = get_session() with session.begin(): if project_id is None: project_id = context.project_id usages = _get_quota_usages(context, session, project_id, resources=deltas.keys()) allocated = quota_allocated_get_all_by_project(context, project_id, session=session) allocated.pop('project_id') work = set(deltas.keys()) while work: resource = work.pop() refresh = False if resource not in usages: usages[resource] = _quota_usage_create(elevated, project_id, resource, 0, 0, until_refresh or None, session=session) refresh = True elif usages[resource].in_use < 0: refresh = True elif usages[resource].until_refresh is not None: usages[resource].until_refresh -= 1 if usages[resource].until_refresh <= 0: refresh = True elif max_age and usages[resource].updated_at is not None and ( (timeutils.utcnow() - usages[resource].updated_at).total_seconds() >= max_age): refresh = True if refresh: sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] volume_type_id = getattr(resources[resource], 'volume_type_id', None) volume_type_name = getattr(resources[resource], 'volume_type_name', None) updates = sync(elevated, project_id, volume_type_id=volume_type_id, volume_type_name=volume_type_name, session=session) for res, in_use in updates.items(): if res not in usages: usages[res] = _quota_usage_create( elevated, project_id, res, 0, 0, until_refresh or None, session=session ) usages[res].in_use = in_use usages[res].until_refresh = until_refresh or None # want to double-sync, we make sure all refreshed # resources are dropped from the work set. work.discard(res) # NOTE(Vek): We make the assumption that the sync # routine actually refreshes the # resources that it is the sync routine # for. We don't check, because this is if is_allocated_reserve: unders = [r for r, delta in deltas.items() if delta < 0 and delta + allocated.get(r, 0) < 0] else: unders = [r for r, delta in deltas.items() if delta < 0 and delta + usages[r].in_use < 0] # NOTE(Vek): We're only concerned about positive increments. overs = [r for r, delta in deltas.items() if quotas[r] >= 0 and delta >= 0 and quotas[r] < delta + usages[r].total + allocated.get(r, 0)] # we're over quota, so the OverQuota raise is # Create the reservations if not overs: reservations = [] for resource, delta in deltas.items(): usage = usages[resource] allocated_id = None if is_allocated_reserve: try: quota = _quota_get(context, project_id, resource, session=session) except exception.ProjectQuotaNotFound: # If we were using the default quota, create DB entry quota = quota_create(context, project_id, resource, quotas[resource], 0) # Since there's no reserved/total for allocated, update quota_allocated_update(context, project_id, resource, quota.allocated + delta) allocated_id = quota.id usage = None reservation = _reservation_create( elevated, str(uuid.uuid4()), usage, project_id, resource, delta, expire, session=session, allocated_id=allocated_id) reservations.append(reservation.uuid) # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0 and not is_allocated_reserve: usages[resource].reserved += delta if unders: LOG.warning("Change will make usage less than 0 for the following " "resources: %s", unders) if overs: usages = {k: dict(in_use=v.in_use, reserved=v.reserved, allocated=allocated.get(k, 0)) for k, v in usages.items()} raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages=usages) return reservations def _quota_reservations(session, context, reservations): # Get the listed reservations return model_query(context, models.Reservation, read_deleted="no", session=session).\ filter(models.Reservation.uuid.in_(reservations)).\ with_for_update().\ all() def _get_reservation_resources(session, context, reservation_ids): reservations = model_query(context, models.Reservation, read_deleted="no", session=session).\ options(load_only('resource')).\ filter(models.Reservation.uuid.in_(reservation_ids)).\ all() return {r.resource for r in reservations} def _dict_with_usage_id(usages): return {row.id: row for row in usages.values()} @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_commit(context, reservations, project_id=None): session = get_session() with session.begin(): usages = _get_quota_usages( context, session, project_id, resources=_get_reservation_resources(session, context, reservations)) usages = _dict_with_usage_id(usages) for reservation in _quota_reservations(session, context, reservations): # Allocated reservations will have already been bumped if not reservation.allocated_id: usage = usages[reservation.usage_id] if reservation.delta >= 0: usage.reserved -= reservation.delta usage.in_use += reservation.delta reservation.delete(session=session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_rollback(context, reservations, project_id=None): session = get_session() with session.begin(): usages = _get_quota_usages( context, session, project_id, resources=_get_reservation_resources(session, context, reservations)) usages = _dict_with_usage_id(usages) for reservation in _quota_reservations(session, context, reservations): if reservation.allocated_id: reservation.quota.allocated -= reservation.delta else: usage = usages[reservation.usage_id] if reservation.delta >= 0: usage.reserved -= reservation.delta reservation.delete(session=session) def quota_destroy_by_project(*args, **kwargs): quota_destroy_all_by_project(only_quotas=True, *args, **kwargs) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_destroy_all_by_project(context, project_id, only_quotas=False): session = get_session() with session.begin(): quotas = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_ref in quotas: quota_ref.delete(session=session) if only_quotas: return quota_usages = model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_usage_ref in quota_usages: quota_usage_ref.delete(session=session) reservations = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for reservation_ref in reservations: reservation_ref.delete(session=session) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_expire(context): session = get_session() with session.begin(): current_time = timeutils.utcnow() results = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter(models.Reservation.expire < current_time).\ all() if results: for reservation in results: if reservation.delta >= 0: if reservation.allocated_id: reservation.quota.allocated -= reservation.delta reservation.quota.save(session=session) else: reservation.usage.reserved -= reservation.delta reservation.usage.save(session=session) reservation.delete(session=session) ################### @require_admin_context def volume_attach(context, values): volume_attachment_ref = models.VolumeAttachment() if not values.get('id'): values['id'] = str(uuid.uuid4()) volume_attachment_ref.update(values) session = get_session() with session.begin(): volume_attachment_ref.save(session=session) return _attachment_get(context, values['id'], session=session) @require_admin_context def volume_attached(context, attachment_id, instance_uuid, host_name, mountpoint, attach_mode, mark_attached): attach_status = fields.VolumeAttachStatus.ATTACHED volume_status = 'in-use' if not mark_attached: attach_status = fields.VolumeAttachStatus.ATTACHING volume_status = 'attaching' if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) session = get_session() with session.begin(): volume_attachment_ref = _attachment_get(context, attachment_id, session=session) updated_values = {'mountpoint': mountpoint, 'attach_status': attach_status, 'instance_uuid': instance_uuid, 'attached_host': host_name, 'attach_time': timeutils.utcnow(), 'attach_mode': attach_mode, 'updated_at': literal_column('updated_at')} volume_attachment_ref.update(updated_values) volume_attachment_ref.save(session=session) del updated_values['updated_at'] volume_ref = _volume_get(context, volume_attachment_ref['volume_id'], session=session) volume_ref['status'] = volume_status volume_ref['attach_status'] = attach_status volume_ref.save(session=session) return (volume_ref, updated_values) @handle_db_data_error @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) if is_admin_context(context): values['volume_admin_metadata'] = \ _metadata_refs(values.get('admin_metadata'), models.VolumeAdminMetadata) elif values.get('volume_admin_metadata'): del values['volume_admin_metadata'] volume_ref = models.Volume() if not values.get('id'): values['id'] = str(uuid.uuid4()) volume_ref.update(values) session = get_session() with session.begin(): session.add(volume_ref) return _volume_get(context, values['id'], session=session) def get_booleans_for_table(table_name): booleans = set() table = getattr(models, table_name.capitalize()) if hasattr(table, '__table__'): columns = table.__table__.columns for column in columns: if isinstance(column.type, sqltypes.Boolean): booleans.add(column.name) return booleans @require_admin_context def volume_data_get_for_host(context, host, count_only=False): host_attr = models.Volume.host conditions = [host_attr == host, host_attr.op('LIKE')(host + ' if count_only: result = model_query(context, func.count(models.Volume.id), read_deleted="no").filter( or_(*conditions)).first() return result[0] or 0 else: result = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no").filter( or_(*conditions)).first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def _volume_data_get_for_project(context, project_id, volume_type_id=None, session=None, host=None): query = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if host: query = query.filter(_filter_host(models.Volume.host, host)) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def _backup_data_get_for_project(context, project_id, volume_type_id=None, session=None): query = model_query(context, func.count(models.Backup.id), func.sum(models.Backup.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_data_get_for_project(context, project_id, volume_type_id=None, host=None): return _volume_data_get_for_project(context, project_id, volume_type_id, host=host) VOLUME_DEPENDENT_MODELS = frozenset([models.VolumeMetadata, models.VolumeAdminMetadata, models.Transfer, models.VolumeGlanceMetadata, models.VolumeAttachment]) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_destroy(context, volume_id): session = get_session() now = timeutils.utcnow() updated_values = {'status': 'deleted', 'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at'), 'migration_status': None} with session.begin(): model_query(context, models.Volume, session=session).\ filter_by(id=volume_id).\ update(updated_values) for model in VOLUME_DEPENDENT_MODELS: model_query(context, model, session=session).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values def _include_in_cluster(context, cluster, model, partial_rename, filters): filters = _clean_filters(filters) if filters and not is_valid_model_filters(model, filters): return None query = get_session().query(model) if hasattr(model, 'deleted'): query = query.filter_by(deleted=False) # cluster_name and host are special filter cases for field in {'cluster_name', 'host'}.intersection(filters): value = filters.pop(field) # We do a special backend filter query = query.filter(_filter_host(getattr(model, field), value)) # If we want to do a partial rename and we haven't set the cluster if partial_rename and isinstance(cluster, six.string_types): cluster = func.replace(getattr(model, field), value, cluster) query = query.filter_by(**filters) result = query.update({'cluster_name': cluster}, synchronize_session=False) return result @require_admin_context def volume_include_in_cluster(context, cluster, partial_rename=True, **filters): return _include_in_cluster(context, cluster, models.Volume, partial_rename, filters) @require_admin_context def volume_detached(context, volume_id, attachment_id): # new proposal for multi-attach remain_attachment = True session = get_session() with session.begin(): try: attachment = _attachment_get(context, attachment_id, session=session) except exception.VolumeAttachmentNotFound: attachment_updates = None attachment = None if attachment: now = timeutils.utcnow() attachment_updates = { 'attach_status': fields.VolumeAttachStatus.DETACHED, 'detach_time': now, 'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at'), } attachment.update(attachment_updates) attachment.save(session=session) del attachment_updates['updated_at'] attachment_list = None volume_ref = _volume_get(context, volume_id, session=session) volume_updates = {'updated_at': literal_column('updated_at')} if not volume_ref.volume_attachment: # NOTE(jdg): We kept the old arg style allowing session exclusively # for this one call attachment_list = volume_attachment_get_all_by_volume_id( context, volume_id, session=session) remain_attachment = False if attachment_list and len(attachment_list) > 0: remain_attachment = True if not remain_attachment: # Hide status update from user if we're performing volume migration if ((not volume_ref.migration_status and not (volume_ref.status == 'uploading')) or volume_ref.migration_status in ('success', 'error')): volume_updates['status'] = 'available' volume_updates['attach_status'] = ( fields.VolumeAttachStatus.DETACHED) else: volume_updates['status'] = 'in-use' volume_updates['attach_status'] = ( fields.VolumeAttachStatus.ATTACHED) volume_ref.update(volume_updates) volume_ref.save(session=session) del volume_updates['updated_at'] return (volume_updates, attachment_updates) def _process_model_like_filter(model, query, filters): if query is None: return query for key in sorted(filters): column_attr = getattr(model, key) if 'property' == type(column_attr).__name__: continue value = filters[key] if not (isinstance(value, (six.string_types, int))): continue query = query.filter( column_attr.op('LIKE')(u'%%%s%%' % value)) return query def apply_like_filters(model): def decorator_filters(process_exact_filters): def _decorator(query, filters): exact_filters = filters.copy() regex_filters = {} for key, value in filters.items(): if key.endswith('~'): exact_filters.pop(key) regex_filters[key.rstrip('~')] = value query = process_exact_filters(query, exact_filters) return _process_model_like_filter(model, query, regex_filters) return _decorator return decorator_filters @require_context def _volume_get_query(context, session=None, project_only=False, joined_load=True): if not joined_load: return model_query(context, models.Volume, session=session, project_only=project_only) if is_admin_context(context): return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_admin_metadata')).\ options(joinedload('volume_type')).\ options(joinedload('volume_attachment')).\ options(joinedload('consistencygroup')).\ options(joinedload('group')) else: return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ options(joinedload('volume_attachment')).\ options(joinedload('consistencygroup')).\ options(joinedload('group')) @require_context def _volume_get(context, volume_id, session=None, joined_load=True): result = _volume_get_query(context, session=session, project_only=True, joined_load=joined_load) if joined_load: result = result.options(joinedload('volume_type.extra_specs')) result = result.filter_by(id=volume_id).first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result def _attachment_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.VolumeAttachment, filters, exclude_list=['project_id']): return [] session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.VolumeAttachment) if query is None: return [] return query.all() def _attachment_get(context, attachment_id, session=None, read_deleted=False, project_only=True): result = (model_query(context, models.VolumeAttachment, session=session, read_deleted=read_deleted) .filter_by(id=attachment_id) .options(joinedload('volume')) .first()) if not result: raise exception.VolumeAttachmentNotFound(filter='attachment_id = %s' % attachment_id) return result def _attachment_get_query(context, session=None, project_only=False): return model_query(context, models.VolumeAttachment, session=session, project_only=project_only).options(joinedload('volume')) @apply_like_filters(model=models.VolumeAttachment) def _process_attachment_filters(query, filters): if filters: project_id = filters.pop('project_id', None) if not is_valid_model_filters(models.VolumeAttachment, filters): return if project_id: volume = models.Volume query = query.filter(volume.id == models.VolumeAttachment.volume_id, volume.project_id == project_id) query = query.filter_by(**filters) return query @require_admin_context def volume_attachment_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return _attachment_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def volume_attachment_get_all_by_volume_id(context, volume_id, session=None): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(volume_id=volume_id).\ filter(models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED). \ options(joinedload('volume')).\ all() return result @require_context def volume_attachment_get_all_by_host(context, host): session = get_session() with session.begin(): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(attached_host=host).\ filter(models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED). \ options(joinedload('volume')).\ all() return result @require_context def volume_attachment_get(context, attachment_id): return _attachment_get(context, attachment_id) @require_context def volume_attachment_get_all_by_instance_uuid(context, instance_uuid): session = get_session() with session.begin(): result = model_query(context, models.VolumeAttachment, session=session).\ filter_by(instance_uuid=instance_uuid).\ filter(models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED).\ options(joinedload('volume')).\ all() return result @require_context def volume_attachment_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _attachment_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def attachment_destroy(context, attachment_id): utcnow = timeutils.utcnow() session = get_session() with session.begin(): updated_values = {'attach_status': fields.VolumeAttachStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} model_query(context, models.VolumeAttachment, session=session).\ filter_by(id=attachment_id).\ update(updated_values) model_query(context, models.AttachmentSpecs, session=session).\ filter_by(attachment_id=attachment_id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values def attachment_specs_exist(context): query = model_query(context, models.AttachmentSpecs, read_deleted='no') return bool(query.first()) def _attachment_specs_query(context, attachment_id, session=None): return model_query(context, models.AttachmentSpecs, session=session, read_deleted="no").\ filter_by(attachment_id=attachment_id) @require_context def attachment_specs_get(context, attachment_id): rows = _attachment_specs_query(context, attachment_id).\ all() result = {row['key']: row['value'] for row in rows} return result @require_context def attachment_specs_delete(context, attachment_id, key): session = get_session() with session.begin(): _attachment_specs_get_item(context, attachment_id, key, session) _attachment_specs_query(context, attachment_id, session).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _attachment_specs_get_item(context, attachment_id, key, session=None): result = _attachment_specs_query( context, attachment_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.AttachmentSpecsNotFound( specs_key=key, attachment_id=attachment_id) return result @handle_db_data_error @require_context def attachment_specs_update_or_create(context, attachment_id, specs): session = get_session() with session.begin(): spec_ref = None for key, value in specs.items(): try: spec_ref = _attachment_specs_get_item( context, attachment_id, key, session) except exception.AttachmentSpecsNotFound: spec_ref = models.AttachmentSpecs() spec_ref.update({"key": key, "value": value, "attachment_id": attachment_id, "deleted": False}) spec_ref.save(session=session) return specs @require_context def volume_get(context, volume_id): return _volume_get(context, volume_id) @require_admin_context def volume_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset) # No volumes would match, return empty list if query is None: return [] return query.all() @require_context def get_volume_summary(context, project_only, filters=None): if not (project_only or is_admin_context(context)): raise exception.AdminRequired() query = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no") if project_only: query = query.filter_by(project_id=context.project_id) if filters: query = _process_volume_filters(query, filters) if query is None: return [] result = query.first() query_metadata = model_query( context, models.VolumeMetadata.key, models.VolumeMetadata.value, read_deleted="no") if project_only: query_metadata = query_metadata.join( models.Volume, models.Volume.id == models.VolumeMetadata.volume_id).filter_by( project_id=context.project_id) result_metadata = query_metadata.distinct().all() result_metadata_list = collections.defaultdict(list) for key, value in result_metadata: result_metadata_list[key].append(value) return (result[0] or 0, result[1] or 0, result_metadata_list) @require_admin_context def volume_get_all_by_host(context, host, filters=None): # As a side effect of the introduction of pool-aware scheduler, # newly created volumes will have pool information appended to # 'host' field of a volume record. So a volume record in DB can # now be either form below: # Host # Host#Pool if host and isinstance(host, six.string_types): session = get_session() with session.begin(): host_attr = getattr(models.Volume, 'host') conditions = [host_attr == host, host_attr.op('LIKE')(host + ' query = _volume_get_query(context).filter(or_(*conditions)) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() elif not host: return [] @require_context def volume_get_all_by_group(context, group_id, filters=None): query = _volume_get_query(context).filter_by(consistencygroup_id=group_id) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() @require_context def volume_get_all_by_generic_group(context, group_id, filters=None): query = _volume_get_query(context).filter_by(group_id=group_id) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() @require_context def volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): session = get_session() with session.begin(): authorize_project_context(context, project_id) # Add in the project filter without modifying the given filters filters = filters.copy() if filters else {} filters['project_id'] = project_id # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset) # No volumes would match, return empty list if query is None: return [] return query.all() def _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset=None, paginate_type=models.Volume): get_query, process_filters, get = PAGINATION_HELPERS[paginate_type] sort_keys, sort_dirs = process_sort_params(sort_keys, sort_dirs, default_dir='desc') query = get_query(context, session=session) if filters: query = process_filters(query, filters) if query is None: return None marker_object = None if marker is not None: marker_object = get(context, marker, session) return sqlalchemyutils.paginate_query(query, paginate_type, limit, sort_keys, marker=marker_object, sort_dirs=sort_dirs, offset=offset) def calculate_resource_count(context, resource_type, filters): session = get_session() if resource_type not in CALCULATE_COUNT_HELPERS.keys(): raise exception.InvalidInput( reason=_("Model %s doesn't support " "counting resource.") % resource_type) get_query, process_filters = CALCULATE_COUNT_HELPERS[resource_type] query = get_query(context, session=session) if filters: query = process_filters(query, filters) if query is None: return 0 return query.with_entities(func.count()).scalar() @apply_like_filters(model=models.Volume) def _process_volume_filters(query, filters): filters = filters.copy() if filters.get('no_migration_targets', False): filters.pop('no_migration_targets') try: column_attr = getattr(models.Volume, 'migration_status') conditions = [column_attr == None, column_attr.op('NOT LIKE')('target:%')] query = query.filter(or_(*conditions)) except AttributeError: LOG.debug("'migration_status' column could not be found.") return None host = filters.pop('host', None) if host: query = query.filter(_filter_host(models.Volume.host, host)) cluster_name = filters.pop('cluster_name', None) if cluster_name: query = query.filter(_filter_host(models.Volume.cluster_name, cluster_name)) for time_comparison_filter in ['created_at', 'updated_at']: if filters.get(time_comparison_filter, None): time_filter_dict = filters.pop(time_comparison_filter) try: time_filter_attr = getattr(models.Volume, time_comparison_filter) query = query.filter(_filter_time_comparison(time_filter_attr, time_filter_dict)) except AttributeError: LOG.debug("%s column could not be found.", time_comparison_filter) return None for key in filters.keys(): if key in ('metadata', 'glance_metadata'): if not isinstance(filters[key], dict): LOG.debug("'%s' filter value is not valid.", key) return None continue try: column_attr = getattr(models.Volume, key) prop = getattr(column_attr, 'property') if isinstance(prop, RelationshipProperty): LOG.debug(("'%s' filter key is not valid, " "it maps to a relationship."), key) return None except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return None filter_dict = {} for key, value in filters.items(): if key == 'metadata': col_attr = getattr(models.Volume, 'volume_metadata') col_ad_attr = getattr(models.Volume, 'volume_admin_metadata') for k, v in value.items(): query = query.filter(or_(col_attr.any(key=k, value=v), col_ad_attr.any(key=k, value=v))) elif key == 'glance_metadata': col_gl_attr = models.Volume.volume_glance_metadata for k, v in value.items(): query = query.filter(col_gl_attr.any(key=k, value=v)) elif isinstance(value, (list, tuple, set, frozenset)): column_attr = getattr(models.Volume, key) query = query.filter(column_attr.in_(value)) else: filter_dict[key] = value if filter_dict: query = query.filter_by(**filter_dict) return query def process_sort_params(sort_keys, sort_dirs, default_keys=None, default_dir='asc'): if default_keys is None: default_keys = ['created_at', 'id'] if sort_dirs and len(sort_dirs): default_dir_value = sort_dirs[0] else: default_dir_value = default_dir if sort_keys: result_keys = list(sort_keys) else: result_keys = [] if sort_dirs: result_dirs = [] for sort_dir in sort_dirs: if sort_dir not in ('asc', 'desc'): msg = _("Unknown sort direction, must be 'desc' or 'asc'.") raise exception.InvalidInput(reason=msg) result_dirs.append(sort_dir) else: result_dirs = [default_dir_value for _sort_key in result_keys] while len(result_dirs) < len(result_keys): result_dirs.append(default_dir_value) if len(result_dirs) > len(result_keys): msg = _("Sort direction array size exceeds sort key array size.") raise exception.InvalidInput(reason=msg) for key in default_keys: if key not in result_keys: result_keys.append(key) result_dirs.append(default_dir_value) return result_keys, result_dirs @handle_db_data_error @require_context def volume_update(context, volume_id, values): session = get_session() with session.begin(): metadata = values.get('metadata') if metadata is not None: _volume_user_metadata_update(context, volume_id, values.pop('metadata'), delete=True, session=session) admin_metadata = values.get('admin_metadata') if is_admin_context(context) and admin_metadata is not None: _volume_admin_metadata_update(context, volume_id, values.pop('admin_metadata'), delete=True, session=session) query = _volume_get_query(context, session, joined_load=False) result = query.filter_by(id=volume_id).update(values) if not result: raise exception.VolumeNotFound(volume_id=volume_id) @handle_db_data_error @require_context def volumes_update(context, values_list): session = get_session() with session.begin(): volume_refs = [] for values in values_list: volume_id = values['id'] values.pop('id') metadata = values.get('metadata') if metadata is not None: _volume_user_metadata_update(context, volume_id, values.pop('metadata'), delete=True, session=session) admin_metadata = values.get('admin_metadata') if is_admin_context(context) and admin_metadata is not None: _volume_admin_metadata_update(context, volume_id, values.pop('admin_metadata'), delete=True, session=session) volume_ref = _volume_get(context, volume_id, session=session) volume_ref.update(values) volume_refs.append(volume_ref) return volume_refs @require_context def volume_attachment_update(context, attachment_id, values): query = model_query(context, models.VolumeAttachment) result = query.filter_by(id=attachment_id).update(values) if not result: raise exception.VolumeAttachmentNotFound( filter='attachment_id = ' + attachment_id) def volume_update_status_based_on_attachment(context, volume_id): session = get_session() with session.begin(): volume_ref = _volume_get(context, volume_id, session=session) if not volume_ref['volume_attachment']: volume_ref.update({'status': 'available'}) else: volume_ref.update({'status': 'in-use'}) return volume_ref def volume_has_snapshots_filter(): return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted)) def volume_has_undeletable_snapshots_filter(): deletable_statuses = ['available', 'error'] return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted, or_(models.Snapshot.cgsnapshot_id != None, models.Snapshot.status.notin_(deletable_statuses)), or_(models.Snapshot.group_snapshot_id != None, models.Snapshot.status.notin_(deletable_statuses)))) def volume_has_snapshots_in_a_cgsnapshot_filter(): return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, models.Snapshot.cgsnapshot_id.isnot(None))) def volume_has_attachments_filter(): return sql.exists().where( and_(models.Volume.id == models.VolumeAttachment.volume_id, models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED, ~models.VolumeAttachment.deleted)) def volume_qos_allows_retype(new_vol_type): q = sql.select([models.VolumeType.qos_specs_id]).where(and_( ~models.VolumeType.deleted, models.VolumeType.id == new_vol_type)) return or_( models.Volume.status == 'available', sql.exists().where(and_( ~models.VolumeType.deleted, models.VolumeType.id == models.Volume.volume_type_id, models.VolumeType.qos_specs_id == q.as_scalar())), and_( ~sql.exists().where(and_( ~models.VolumeType.deleted, models.VolumeType.id == models.Volume.volume_type_id, (models.VolumeType.qos_specs_id == models.QualityOfServiceSpecs.specs_id), models.QualityOfServiceSpecs.key == 'consumer', models.QualityOfServiceSpecs.value != 'back-end')), ~sql.exists().where(and_( ~models.VolumeType.deleted, models.VolumeType.id == new_vol_type, (models.VolumeType.qos_specs_id == models.QualityOfServiceSpecs.specs_id), models.QualityOfServiceSpecs.key == 'consumer', models.QualityOfServiceSpecs.value != 'back-end')))) def volume_has_other_project_snp_filter(): return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, models.Volume.project_id != models.Snapshot.project_id)) id) def _volume_x_metadata_get(context, volume_id, model, session=None): rows = _volume_x_metadata_get_query(context, volume_id, model, session=session).all() result = {} for row in rows: result[row['key']] = row['value'] return result def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec, session=None): result = _volume_x_metadata_get_query(context, volume_id, model, session=session).\ filter_by(key=key).\ first() if not result: if model is models.VolumeGlanceMetadata: raise notfound_exec(id=volume_id) else: raise notfound_exec(metadata_key=key, volume_id=volume_id) return result def _volume_x_metadata_update(context, volume_id, metadata, delete, model, session=None, add=True, update=True): session = session or get_session() metadata = metadata.copy() with session.begin(subtransactions=True): if delete: expected_values = {'volume_id': volume_id} if metadata: expected_values['key'] = db.Not(metadata.keys()) conditional_update(context, model, {'deleted': True, 'deleted_at': timeutils.utcnow()}, expected_values) # Get existing metadata db_meta = _volume_x_metadata_get_query(context, volume_id, model).all() save = [] skip = [] # We only want to send changed metadata. for row in db_meta: if row.key in metadata: value = metadata.pop(row.key) if row.value != value and update: # ORM objects will not be saved until we do the bulk save row.value = value save.append(row) continue skip.append(row) # We also want to save non-existent metadata if add: save.extend(model(key=key, value=value, volume_id=volume_id) for key, value in metadata.items()) # Do a bulk save if save: session.bulk_save_objects(save, update_changed_only=True) # Construct result dictionary with current metadata save.extend(skip) result = {row['key']: row['value'] for row in save} return result def _volume_user_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeMetadata, session=session) def _volume_image_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeGlanceMetadata, session=session) @require_context def _volume_user_metadata_get(context, volume_id, session=None): return _volume_x_metadata_get(context, volume_id, models.VolumeMetadata, session=session) @require_context def _volume_user_metadata_get_item(context, volume_id, key, session=None): return _volume_x_metadata_get_item(context, volume_id, key, models.VolumeMetadata, exception.VolumeMetadataNotFound, session=session) @require_context @require_volume_exists def _volume_user_metadata_update(context, volume_id, metadata, delete, session=None): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeMetadata, session=session) @require_context @require_volume_exists def _volume_image_metadata_update(context, volume_id, metadata, delete, session=None): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeGlanceMetadata, session=session) @require_context def _volume_glance_metadata_key_to_id(context, volume_id, key): db_data = volume_glance_metadata_get(context, volume_id) metadata = {meta_entry.key: meta_entry.id for meta_entry in db_data if meta_entry.key == key} metadata_id = metadata[key] return metadata_id @require_context @require_volume_exists def volume_metadata_get(context, volume_id): return _volume_user_metadata_get(context, volume_id) @require_context @require_volume_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_metadata_delete(context, volume_id, key, meta_type): if meta_type == common.METADATA_TYPES.user: (_volume_user_metadata_get_query(context, volume_id). filter_by(key=key). update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) elif meta_type == common.METADATA_TYPES.image: metadata_id = _volume_glance_metadata_key_to_id(context, volume_id, key) (_volume_image_metadata_get_query(context, volume_id). filter_by(id=metadata_id). update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) else: raise exception.InvalidMetadataType(metadata_type=meta_type, id=volume_id) @require_context @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_metadata_update(context, volume_id, metadata, delete, meta_type): if meta_type == common.METADATA_TYPES.user: return _volume_user_metadata_update(context, volume_id, metadata, delete) elif meta_type == common.METADATA_TYPES.image: return _volume_image_metadata_update(context, volume_id, metadata, delete) else: raise exception.InvalidMetadataType(metadata_type=meta_type, id=volume_id) ################### def _volume_admin_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeAdminMetadata, session=session) @require_admin_context @require_volume_exists def _volume_admin_metadata_get(context, volume_id, session=None): return _volume_x_metadata_get(context, volume_id, models.VolumeAdminMetadata, session=session) @require_admin_context @require_volume_exists def _volume_admin_metadata_update(context, volume_id, metadata, delete, session=None, add=True, update=True): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeAdminMetadata, session=session, add=add, update=update) @require_admin_context def volume_admin_metadata_get(context, volume_id): return _volume_admin_metadata_get(context, volume_id) @require_admin_context @require_volume_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_admin_metadata_delete(context, volume_id, key): _volume_admin_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_admin_metadata_update(context, volume_id, metadata, delete, add=True, update=True): return _volume_admin_metadata_update(context, volume_id, metadata, delete, add=add, update=update) ################### @require_context @handle_db_data_error def snapshot_create(context, values): values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), models.SnapshotMetadata) if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session.add(snapshot_ref) return _snapshot_get(context, values['id'], session=session) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def snapshot_destroy(context, snapshot_id): utcnow = timeutils.utcnow() session = get_session() with session.begin(): updated_values = {'status': 'deleted', 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} model_query(context, models.Snapshot, session=session).\ filter_by(id=snapshot_id).\ update(updated_values) model_query(context, models.SnapshotMetadata, session=session).\ filter_by(snapshot_id=snapshot_id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values @require_context def _snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.Snapshot, session=session, project_only=True).\ options(joinedload('volume')).\ options(joinedload('snapshot_metadata')).\ filter_by(id=snapshot_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_context def snapshot_get(context, snapshot_id): return _snapshot_get(context, snapshot_id) @require_admin_context def snapshot_get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): if filters and not is_valid_model_filters( models.Snapshot, filters, exclude_list=('host', 'cluster_name', 'availability_zone')): return [] session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Snapshot) # No snapshots would match, return empty list if not query: return [] return query.all() def _snaps_get_query(context, session=None, project_only=False): return model_query(context, models.Snapshot, session=session, project_only=project_only).\ options(joinedload('snapshot_metadata')) @apply_like_filters(model=models.Snapshot) def _process_snaps_filters(query, filters): if filters: filters = filters.copy() exclude_list = ('host', 'cluster_name', 'availability_zone') # Ensure that filters' keys exist on the model or is metadata for key in filters.keys(): if key == 'metadata': if not isinstance(filters[key], dict): LOG.debug("Metadata filter value is not valid dictionary") return None continue if key in exclude_list: continue try: column_attr = getattr(models.Snapshot, key) prop = getattr(column_attr, 'property') if isinstance(prop, RelationshipProperty): LOG.debug( "'%s' key is not valid, it maps to a relationship.", key) return None except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return None host = filters.pop('host', None) cluster = filters.pop('cluster_name', None) az = filters.pop('availability_zone', None) if host or cluster or az: query = query.join(models.Snapshot.volume) vol_field = models.Volume if host: query = query.filter(_filter_host(vol_field.host, host)) if cluster: query = query.filter(_filter_host(vol_field.cluster_name, cluster)) if az: query = query.filter_by(availability_zone=az) filters_dict = {} LOG.debug("Building query based on filter") for key, value in filters.items(): if key == 'metadata': col_attr = getattr(models.Snapshot, 'snapshot_metadata') for k, v in value.items(): query = query.filter(col_attr.any(key=k, value=v)) else: filters_dict[key] = value if filters_dict: query = query.filter_by(**filters_dict) return query @require_context def snapshot_get_all_for_volume(context, volume_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_latest_for_volume(context, volume_id): result = model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).\ options(joinedload('snapshot_metadata')).\ order_by(desc(models.Snapshot.created_at)).\ first() if not result: raise exception.VolumeSnapshotNotFound(volume_id=volume_id) return result @require_context def snapshot_get_all_by_host(context, host, filters=None): if filters and not is_valid_model_filters(models.Snapshot, filters): return [] query = model_query(context, models.Snapshot, read_deleted='no', project_only=True) if filters: query = query.filter_by(**filters) if host and isinstance(host, six.string_types): session = get_session() with session.begin(): host_attr = getattr(models.Volume, 'host') conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] query = query.join(models.Snapshot.volume).filter( or_(*conditions)).options(joinedload('snapshot_metadata')) return query.all() elif not host: return [] @require_context def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(cgsnapshot_id=cgsnapshot_id).\ options(joinedload('volume')).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_all_for_group_snapshot(context, group_snapshot_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(group_snapshot_id=group_snapshot_id).\ options(joinedload('volume')).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): if filters and not is_valid_model_filters( models.Snapshot, filters, exclude_list=('host', 'cluster_name', 'availability_zone')): return [] authorize_project_context(context, project_id) filters = filters.copy() if filters else {} filters['project_id'] = project_id session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Snapshot) if not query: return [] query = query.options(joinedload('snapshot_metadata')) return query.all() @require_context def _snapshot_data_get_for_project(context, project_id, volume_type_id=None, session=None, host=None): authorize_project_context(context, project_id) query = model_query(context, func.count(models.Snapshot.id), func.sum(models.Snapshot.volume_size), read_deleted="no", session=session) if volume_type_id or host: query = query.join('volume') if volume_type_id: query = query.filter( models.Volume.volume_type_id == volume_type_id) if host: query = query.filter(_filter_host(models.Volume.host, host)) result = query.filter(models.Snapshot.project_id == project_id).first() return (result[0] or 0, result[1] or 0) @require_context def snapshot_data_get_for_project(context, project_id, volume_type_id=None, host=None): return _snapshot_data_get_for_project(context, project_id, volume_type_id, host=host) @require_context def snapshot_get_all_active_by_window(context, begin, end=None, project_id=None): query = model_query(context, models.Snapshot, read_deleted="yes") query = query.filter(or_(models.Snapshot.deleted_at == None, models.Snapshot.deleted_at > begin)) query = query.options(joinedload(models.Snapshot.volume)) query = query.options(joinedload('snapshot_metadata')) if end: query = query.filter(models.Snapshot.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @handle_db_data_error @require_context def snapshot_update(context, snapshot_id, values): query = model_query(context, models.Snapshot, project_only=True) result = query.filter_by(id=snapshot_id).update(values) if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) @require_context def get_snapshot_summary(context, project_only, filters=None): if not (project_only or is_admin_context(context)): raise exception.AdminRequired() query = model_query(context, func.count(models.Snapshot.id), func.sum(models.Snapshot.volume_size), read_deleted="no") if project_only: query = query.filter_by(project_id=context.project_id) if filters: query = _process_snaps_filters(query, filters) if query is None: return [] result = query.first() return result[0] or 0, result[1] or 0 filter_by(snapshot_id=snapshot_id) @require_context def _snapshot_metadata_get(context, snapshot_id, session=None): rows = _snapshot_metadata_get_query(context, snapshot_id, session).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_snapshot_exists def snapshot_metadata_get(context, snapshot_id): return _snapshot_metadata_get(context, snapshot_id) @require_context @require_snapshot_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def snapshot_metadata_delete(context, snapshot_id, key): _snapshot_metadata_get_query(context, snapshot_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _snapshot_metadata_get_item(context, snapshot_id, key, session=None): result = _snapshot_metadata_get_query(context, snapshot_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.SnapshotMetadataNotFound(metadata_key=key, snapshot_id=snapshot_id) return result @require_context @require_snapshot_exists @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def snapshot_metadata_update(context, snapshot_id, metadata, delete): session = get_session() with session.begin(): if delete: original_metadata = _snapshot_metadata_get(context, snapshot_id, session) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _snapshot_metadata_get_item(context, snapshot_id, meta_key, session) meta_ref.update({'deleted': True, 'deleted_at': timeutils.utcnow()}) meta_ref.save(session=session) meta_ref = None for meta_key, meta_value in metadata.items(): item = {"value": meta_value} try: meta_ref = _snapshot_metadata_get_item(context, snapshot_id, meta_key, session) except exception.SnapshotMetadataNotFound: meta_ref = models.SnapshotMetadata() item.update({"key": meta_key, "snapshot_id": snapshot_id}) meta_ref.update(item) meta_ref.save(session=session) return snapshot_metadata_get(context, snapshot_id) )) projects = projects or [] orm_projects = [] session = get_session() with session.begin(): try: _volume_type_get_by_name(context, values['name'], session) raise exception.VolumeTypeExists(id=values['name']) except exception.VolumeTypeNotFoundByName: pass try: _volume_type_get(context, values['id'], session) raise exception.VolumeTypeExists(id=values['id']) except exception.VolumeTypeNotFound: pass try: values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeType() volume_type_ref.update(values) session.add(volume_type_ref) except Exception as e: raise db_exc.DBError(e) for project in set(projects): access_ref = models.VolumeTypeProjects() access_ref.update({"volume_type_id": volume_type_ref.id, "project_id": project}) access_ref.save(session=session) orm_projects.append(access_ref) volume_type_ref.projects = orm_projects return volume_type_ref @handle_db_data_error @require_admin_context def group_type_create(context, values, projects=None): if not values.get('id'): values['id'] = six.text_type(uuid.uuid4()) projects = projects or [] session = get_session() with session.begin(): try: _group_type_get_by_name(context, values['name'], session) raise exception.GroupTypeExists(id=values['name']) except exception.GroupTypeNotFoundByName: pass try: _group_type_get(context, values['id'], session) raise exception.GroupTypeExists(id=values['id']) except exception.GroupTypeNotFound: pass try: values['group_specs'] = _metadata_refs(values.get('group_specs'), models.GroupTypeSpecs) group_type_ref = models.GroupType() group_type_ref.update(values) session.add(group_type_ref) except Exception as e: raise db_exc.DBError(e) for project in set(projects): access_ref = models.GroupTypeProjects() access_ref.update({"group_type_id": group_type_ref.id, "project_id": project}) access_ref.save(session=session) return group_type_ref def _volume_type_get_query(context, session=None, read_deleted='no', expected_fields=None): expected_fields = expected_fields or [] query = model_query(context, models.VolumeType, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')) for expected in expected_fields: query = query.options(joinedload(expected)) if not context.is_admin: the_filter = [models.VolumeType.is_public == true()] projects_attr = getattr(models.VolumeType, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query def _group_type_get_query(context, session=None, read_deleted='no', expected_fields=None): expected_fields = expected_fields or [] query = model_query(context, models.GroupType, session=session, read_deleted=read_deleted).\ options(joinedload('group_specs')) if 'projects' in expected_fields: query = query.options(joinedload('projects')) if not context.is_admin: the_filter = [models.GroupType.is_public == true()] projects_attr = models.GroupType.projects the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query def _process_volume_types_filters(query, filters): context = filters.pop('context', None) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.VolumeType.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models.VolumeType, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) if 'is_public' in filters: del filters['is_public'] if filters: if not is_valid_model_filters(models.VolumeType, filters): return if filters.get('extra_specs') is not None: the_filter = [] searchdict = filters.pop('extra_specs') extra_specs = getattr(models.VolumeType, 'extra_specs') for k, v in searchdict.items(): # NOTE(tommylikehu): We will use 'LIKE' operator for # 'availability_zones' extra spec as it always store the # AZ list info within the format: "az1, az2,...." if k == 'RESKEY:availability_zones': the_filter.extend([extra_specs.any( models.VolumeTypeExtraSpecs.value.like(u'%%%s%%' % v), key=k, deleted=False)]) else: the_filter.extend( [extra_specs.any(key=k, value=v, deleted=False)]) if len(the_filter) > 1: query = query.filter(and_(*the_filter)) else: query = query.filter(the_filter[0]) query = query.filter_by(**filters) return query def _process_group_types_filters(query, filters): context = filters.pop('context', None) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.GroupType.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models.GroupType, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id, deleted=False) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) if 'is_public' in filters: del filters['is_public'] if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.GroupType, filters): return if filters.get('group_specs') is not None: the_filter = [] searchdict = filters.pop('group_specs') group_specs = getattr(models.GroupType, 'group_specs') for k, v in searchdict.items(): the_filter.extend([group_specs.any(key=k, value=v, deleted=False)]) if len(the_filter) > 1: query = query.filter(and_(*the_filter)) else: query = query.filter(the_filter[0]) query = query.filter_by(**filters) return query @handle_db_data_error @require_admin_context def _type_update(context, type_id, values, is_group): if is_group: model = models.GroupType exists_exc = exception.GroupTypeExists else: model = models.VolumeType exists_exc = exception.VolumeTypeExists session = get_session() with session.begin(): if values['description'] is None: del values['description'] if values['is_public'] is None: del values['is_public'] if values['name'] is None: del values['name'] else: conditions = and_(model.name == values['name'], model.id != type_id, ~model.deleted) query = session.query(sql.exists().where(conditions)) if query.scalar(): raise exists_exc(id=values['name']) query = model_query(context, model, project_only=True, session=session) result = query.filter_by(id=type_id).update(values) if not result: if is_group: raise exception.GroupTypeNotFound(group_type_id=type_id) else: raise exception.VolumeTypeNotFound(volume_type_id=type_id) def volume_type_update(context, volume_type_id, values): _type_update(context, volume_type_id, values, is_group=False) def group_type_update(context, group_type_id, values): _type_update(context, group_type_id, values, is_group=True) @require_context def volume_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): session = get_session() with session.begin(): filters = filters or {} filters['context'] = context query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.VolumeType) if query is None: if list_result: return [] return {} rows = query.all() if list_result: result = [_dict_with_extra_specs_if_authorized(context, row) for row in rows] return result result = {row['name']: _dict_with_extra_specs_if_authorized(context, row) for row in rows} return result @require_context def group_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): session = get_session() with session.begin(): filters = filters or {} filters['context'] = context query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.GroupType) if query is None: if list_result: return [] return {} rows = query.all() if list_result: result = [_dict_with_group_specs_if_authorized(context, row) for row in rows] return result result = {row['name']: _dict_with_group_specs_if_authorized(context, row) for row in rows} return result def _volume_type_get_id_from_volume_type_query(context, id, session=None): return model_query( context, models.VolumeType.id, read_deleted="no", session=session, base_model=models.VolumeType).\ filter_by(id=id) def _group_type_get_id_from_group_type_query(context, id, session=None): return model_query( context, models.GroupType.id, read_deleted="no", session=session, base_model=models.GroupType).\ filter_by(id=id) def _volume_type_get_id_from_volume_type(context, id, session=None): result = _volume_type_get_id_from_volume_type_query( context, id, session=session).first() if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) return result[0] def _group_type_get_id_from_group_type(context, id, session=None): result = _group_type_get_id_from_group_type_query( context, id, session=session).first() if not result: raise exception.GroupTypeNotFound(group_type_id=id) return result[0] def _volume_type_get_db_object(context, id, session=None, inactive=False, expected_fields=None): read_deleted = "yes" if inactive else "no" result = _volume_type_get_query( context, session, read_deleted, expected_fields).\ filter_by(id=id).\ first() return result def _group_type_get_db_object(context, id, session=None, inactive=False, expected_fields=None): read_deleted = "yes" if inactive else "no" result = _group_type_get_query( context, session, read_deleted, expected_fields).\ filter_by(id=id).\ first() return result @require_context def _volume_type_get(context, id, session=None, inactive=False, expected_fields=None): expected_fields = expected_fields or [] result = _volume_type_get_db_object(context, id, session, inactive, expected_fields) if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) vtype = _dict_with_extra_specs_if_authorized(context, result) if 'projects' in expected_fields: vtype['projects'] = [p['project_id'] for p in result['projects']] if 'qos_specs' in expected_fields: vtype['qos_specs'] = result.qos_specs return vtype @require_context def _group_type_get(context, id, session=None, inactive=False, expected_fields=None): expected_fields = expected_fields or [] result = _group_type_get_db_object(context, id, session, inactive, expected_fields) if not result: raise exception.GroupTypeNotFound(group_type_id=id) gtype = _dict_with_group_specs_if_authorized(context, result) if 'projects' in expected_fields: gtype['projects'] = [p['project_id'] for p in result['projects']] return gtype @require_context def volume_type_get(context, id, inactive=False, expected_fields=None): return _volume_type_get(context, id, session=None, inactive=inactive, expected_fields=expected_fields) @require_context def group_type_get(context, id, inactive=False, expected_fields=None): return _group_type_get(context, id, session=None, inactive=inactive, expected_fields=expected_fields) def _volume_type_get_full(context, id): return _volume_type_get(context, id, session=None, inactive=False, expected_fields=('extra_specs', 'projects')) def _group_type_get_full(context, id): return _group_type_get(context, id, session=None, inactive=False, expected_fields=('group_specs', 'projects')) @require_context def _volume_type_ref_get(context, id, session=None, inactive=False): read_deleted = "yes" if inactive else "no" result = model_query(context, models.VolumeType, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) return result @require_context def _group_type_ref_get(context, id, session=None, inactive=False): read_deleted = "yes" if inactive else "no" result = model_query(context, models.GroupType, session=session, read_deleted=read_deleted).\ options(joinedload('group_specs')).\ filter_by(id=id).\ first() if not result: raise exception.GroupTypeNotFound(group_type_id=id) return result @require_context def _volume_type_get_by_name(context, name, session=None): result = model_query(context, models.VolumeType, session=session).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) return _dict_with_extra_specs_if_authorized(context, result) @require_context def _group_type_get_by_name(context, name, session=None): result = model_query(context, models.GroupType, session=session).\ options(joinedload('group_specs')).\ filter_by(name=name).\ first() if not result: raise exception.GroupTypeNotFoundByName(group_type_name=name) return _dict_with_group_specs_if_authorized(context, result) @require_context def volume_type_get_by_name(context, name): return _volume_type_get_by_name(context, name) @require_context def group_type_get_by_name(context, name): return _group_type_get_by_name(context, name) @require_context def volume_types_get_by_name_or_id(context, volume_type_list): req_volume_types = [] for vol_t in volume_type_list: if not uuidutils.is_uuid_like(vol_t): vol_type = _volume_type_get_by_name(context, vol_t) else: try: vol_type = _volume_type_get(context, vol_t) except exception.VolumeTypeNotFound: try: vol_type = _volume_type_get_by_name(context, vol_t) except exception.VolumeTypeNotFoundByName: raise exception.VolumeTypeNotFound(volume_type_id=vol_t) req_volume_types.append(vol_type) return req_volume_types @require_context def group_types_get_by_name_or_id(context, group_type_list): req_group_types = [] for grp_t in group_type_list: if not uuidutils.is_uuid_like(grp_t): grp_type = _group_type_get_by_name(context, grp_t) else: grp_type = _group_type_get(context, grp_t) req_group_types.append(grp_type) return req_group_types @require_admin_context def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): read_deleted = "yes" if inactive else "no" if not resource_exists(context, models.QualityOfServiceSpecs, qos_specs_id): raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) vts = (model_query(context, models.VolumeType, read_deleted=read_deleted). options(joinedload('extra_specs')). options(joinedload('projects')). filter_by(qos_specs_id=qos_specs_id).all()) return vts @require_admin_context def volume_type_qos_associate(context, type_id, qos_specs_id): session = get_session() with session.begin(): _volume_type_get(context, type_id, session) session.query(models.VolumeType). \ filter_by(id=type_id). \ update({'qos_specs_id': qos_specs_id, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_disassociate(context, qos_specs_id, type_id): session = get_session() with session.begin(): _volume_type_get(context, type_id, session) session.query(models.VolumeType). \ filter_by(id=type_id). \ filter_by(qos_specs_id=qos_specs_id). \ update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_disassociate_all(context, qos_specs_id): session = get_session() with session.begin(): session.query(models.VolumeType). \ filter_by(qos_specs_id=qos_specs_id). \ update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_specs_get(context, type_id): session = get_session() with session.begin(): _volume_type_get(context, type_id, session) row = session.query(models.VolumeType). \ options(joinedload('qos_specs')). \ filter_by(id=type_id). \ first() specs = _dict_with_qos_specs(row.qos_specs) if not specs: specs = None else: specs = specs[0] return {'qos_specs': specs} @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_type_destroy(context, id): utcnow = timeutils.utcnow() session = get_session() with session.begin(): _volume_type_get(context, id, session) results = model_query(context, models.Volume, session=session). \ filter_by(volume_type_id=id).all() group_count = model_query(context, models.GroupVolumeTypeMapping, read_deleted="no", session=session).\ filter_by(volume_type_id=id).count() cg_count = model_query(context, models.ConsistencyGroup, session=session).filter( models.ConsistencyGroup.volume_type_id.contains(id)).count() if results or group_count or cg_count: LOG.error('VolumeType %s deletion failed, VolumeType in use.', id) raise exception.VolumeTypeInUse(volume_type_id=id) updated_values = {'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} model_query(context, models.VolumeType, session=session).\ filter_by(id=id).\ update(updated_values) model_query(context, models.VolumeTypeExtraSpecs, session=session).\ filter_by(volume_type_id=id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) model_query(context, models.Encryption, session=session).\ filter_by(volume_type_id=id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) model_query(context, models.VolumeTypeProjects, session=session, read_deleted="int_no").filter_by( volume_type_id=id).soft_delete(synchronize_session=False) del updated_values['updated_at'] return updated_values @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def group_type_destroy(context, id): session = get_session() with session.begin(): _group_type_get(context, id, session) results = model_query(context, models.Group, session=session). \ filter_by(group_type_id=id).all() if results: LOG.error('GroupType %s deletion failed, ' 'GroupType in use.', id) raise exception.GroupTypeInUse(group_type_id=id) model_query(context, models.GroupType, session=session).\ filter_by(id=id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) model_query(context, models.GroupTypeSpecs, session=session).\ filter_by(group_type_id=id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_get_all_active_by_window(context, begin, end=None, project_id=None): query = model_query(context, models.Volume, read_deleted="yes") query = query.filter(or_(models.Volume.deleted_at == None, models.Volume.deleted_at > begin)) if end: query = query.filter(models.Volume.created_at < end) if project_id: query = query.filter_by(project_id=project_id) query = (query.options(joinedload('volume_metadata')). options(joinedload('volume_type')). options(joinedload('volume_attachment')). options(joinedload('consistencygroup')). options(joinedload('group'))) if is_admin_context(context): query = query.options(joinedload('volume_admin_metadata')) return query.all() def _volume_type_access_query(context, session=None): return model_query(context, models.VolumeTypeProjects, session=session, read_deleted="int_no") def _group_type_access_query(context, session=None): return model_query(context, models.GroupTypeProjects, session=session, read_deleted="no") @require_admin_context def volume_type_access_get_all(context, type_id): volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) return _volume_type_access_query(context).\ filter_by(volume_type_id=volume_type_id).all() @require_admin_context def group_type_access_get_all(context, type_id): group_type_id = _group_type_get_id_from_group_type(context, type_id) return _group_type_access_query(context).\ filter_by(group_type_id=group_type_id).all() def _group_volume_type_mapping_query(context, session=None): return model_query(context, models.GroupVolumeTypeMapping, session=session, read_deleted="no") @require_admin_context def volume_type_get_all_by_group(context, group_id): mappings = (_group_volume_type_mapping_query(context). filter_by(group_id=group_id).all()) session = get_session() with session.begin(): volume_type_ids = [mapping.volume_type_id for mapping in mappings] query = (model_query(context, models.VolumeType, session=session, read_deleted='no'). filter(models.VolumeType.id.in_(volume_type_ids)). options(joinedload('extra_specs')). options(joinedload('projects')). all()) return query def _group_volume_type_mapping_get_all_by_group_volume_type(context, group_id, volume_type_id): mappings = _group_volume_type_mapping_query(context).\ filter_by(group_id=group_id).\ filter_by(volume_type_id=volume_type_id).all() return mappings @require_admin_context def volume_type_access_add(context, type_id, project_id): volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) access_ref = models.VolumeTypeProjects() access_ref.update({"volume_type_id": volume_type_id, "project_id": project_id}) session = get_session() with session.begin(): try: access_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.VolumeTypeAccessExists(volume_type_id=type_id, project_id=project_id) return access_ref @require_admin_context def group_type_access_add(context, type_id, project_id): group_type_id = _group_type_get_id_from_group_type(context, type_id) access_ref = models.GroupTypeProjects() access_ref.update({"group_type_id": group_type_id, "project_id": project_id}) session = get_session() with session.begin(): try: access_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.GroupTypeAccessExists(group_type_id=type_id, project_id=project_id) return access_ref @require_admin_context def volume_type_access_remove(context, type_id, project_id): volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) count = (_volume_type_access_query(context). filter_by(volume_type_id=volume_type_id). filter_by(project_id=project_id). soft_delete(synchronize_session=False)) if count == 0: raise exception.VolumeTypeAccessNotFound( volume_type_id=type_id, project_id=project_id) @require_admin_context def group_type_access_remove(context, type_id, project_id): group_type_id = _group_type_get_id_from_group_type(context, type_id) count = (_group_type_access_query(context). filter_by(group_type_id=group_type_id). filter_by(project_id=project_id). soft_delete(synchronize_session=False)) if count == 0: raise exception.GroupTypeAccessNotFound( group_type_id=type_id, project_id=project_id) d="no").\ filter_by(volume_type_id=volume_type_id) @require_context def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): session = get_session() with session.begin(): _volume_type_extra_specs_get_item(context, volume_type_id, key, session) _volume_type_extra_specs_query(context, volume_type_id, session).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): result = _volume_type_extra_specs_query( context, volume_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeTypeExtraSpecsNotFound( extra_specs_key=key, volume_type_id=volume_type_id) return result @handle_db_data_error @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() with session.begin(): spec_ref = None for key, value in specs.items(): try: spec_ref = _volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": False}) spec_ref.save(session=session) return specs filter_by(group_type_id=group_type_id) @require_context def group_type_specs_get(context, group_type_id): rows = _group_type_specs_query(context, group_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def group_type_specs_delete(context, group_type_id, key): session = get_session() with session.begin(): _group_type_specs_get_item(context, group_type_id, key, session) _group_type_specs_query(context, group_type_id, session).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _group_type_specs_get_item(context, group_type_id, key, session=None): result = _group_type_specs_query( context, group_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.GroupTypeSpecsNotFound( group_specs_key=key, group_type_id=group_type_id) return result @handle_db_data_error @require_context def group_type_specs_update_or_create(context, group_type_id, specs): session = get_session() with session.begin(): spec_ref = None for key, value in specs.items(): try: spec_ref = _group_type_specs_get_item( context, group_type_id, key, session) except exception.GroupTypeSpecsNotFound: spec_ref = models.GroupTypeSpecs() spec_ref.update({"key": key, "value": value, "group_type_id": group_type_id, "deleted": False}) spec_ref.save(session=session) return specs t_all_by_name(context, values['name'], session) raise exception.QoSSpecsExists(specs_id=values['name']) except exception.QoSSpecsNotFound: pass try: specs_root = models.QualityOfServiceSpecs() root = dict(id=specs_id) root['key'] = 'QoS_Specs_Name' root['value'] = values['name'] LOG.debug("DB qos_specs_create(): root %s", root) specs_root.update(root) specs_root.save(session=session) consumer = {'key': 'consumer', 'value': values['consumer'], 'specs_id': specs_id, 'id': six.text_type(uuid.uuid4())} cons_entry = models.QualityOfServiceSpecs() cons_entry.update(consumer) cons_entry.save(session=session) for k, v in values.get('specs', {}).items(): item = dict(key=k, value=v, specs_id=specs_id) item['id'] = str(uuid.uuid4()) spec_entry = models.QualityOfServiceSpecs() spec_entry.update(item) spec_entry.save(session=session) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) except Exception as e: raise db_exc.DBError(e) return dict(id=specs_root.id, name=specs_root.value) @require_admin_context def _qos_specs_get_all_by_name(context, name, session=None, inactive=False): read_deleted = 'yes' if inactive else 'no' results = model_query(context, models.QualityOfServiceSpecs, read_deleted=read_deleted, session=session). \ filter_by(key='QoS_Specs_Name'). \ filter_by(value=name). \ options(joinedload('specs')).all() if not results: raise exception.QoSSpecsNotFound(specs_id=name) return results @require_admin_context def _qos_specs_get_all_ref(context, qos_specs_id, session=None, inactive=False): read_deleted = 'yes' if inactive else 'no' result = model_query(context, models.QualityOfServiceSpecs, read_deleted=read_deleted, session=session). \ filter_by(id=qos_specs_id). \ options(joinedload_all('specs')).all() if not result: raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) return result def _dict_with_children_specs(specs): result = {} update_time = None for spec in specs: if not spec['deleted']: if not update_time and spec['updated_at']: update_time = spec['updated_at'] elif update_time and spec['updated_at']: if (update_time - spec['updated_at']).total_seconds() < 0: update_time = spec['updated_at'] result.update({spec['key']: spec['value']}) if update_time: result.update({'updated_at': update_time}) return result def _dict_with_qos_specs(rows): result = [] for row in rows: if row['key'] == 'QoS_Specs_Name': member = {'name': row['value'], 'id': row['id'], 'created_at': row['created_at']} if row.specs: spec_dict = _dict_with_children_specs(row.specs) member['consumer'] = spec_dict.pop('consumer') if spec_dict.get('updated_at'): member['updated_at'] = spec_dict.pop('updated_at') member.update(dict(specs=spec_dict)) result.append(member) return result @require_admin_context def qos_specs_get(context, qos_specs_id, inactive=False): rows = _qos_specs_get_all_ref(context, qos_specs_id, None, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context def qos_specs_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.QualityOfServiceSpecs) if query is None: return [] rows = query.all() return _dict_with_qos_specs(rows) @require_admin_context def _qos_specs_get_query(context, session): rows = model_query(context, models.QualityOfServiceSpecs, session=session, read_deleted='no').\ options(joinedload_all('specs')).filter_by(key='QoS_Specs_Name') return rows def _process_qos_specs_filters(query, filters): if filters: if not is_valid_model_filters(models.QualityOfServiceSpecs, filters): return query = query.filter_by(**filters) return query @require_admin_context def _qos_specs_get(context, qos_spec_id, session=None): result = model_query(context, models.QualityOfServiceSpecs, session=session, read_deleted='no').\ filter_by(id=qos_spec_id).filter_by(key='QoS_Specs_Name').first() if not result: raise exception.QoSSpecsNotFound(specs_id=qos_spec_id) return result @require_admin_context def qos_specs_get_by_name(context, name, inactive=False): rows = _qos_specs_get_all_by_name(context, name, None, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context def qos_specs_associations_get(context, qos_specs_id): return volume_type_qos_associations_get(context, qos_specs_id) @require_admin_context def qos_specs_associate(context, qos_specs_id, type_id): return volume_type_qos_associate(context, type_id, qos_specs_id) @require_admin_context def qos_specs_disassociate(context, qos_specs_id, type_id): return volume_type_qos_disassociate(context, qos_specs_id, type_id) @require_admin_context def qos_specs_disassociate_all(context, qos_specs_id): return volume_type_qos_disassociate_all(context, qos_specs_id) @require_admin_context def qos_specs_item_delete(context, qos_specs_id, key): session = get_session() with session.begin(): session.query(models.QualityOfServiceSpecs). \ filter(models.QualityOfServiceSpecs.key == key). \ filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def qos_specs_delete(context, qos_specs_id): session = get_session() with session.begin(): _qos_specs_get_all_ref(context, qos_specs_id, session) updated_values = {'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')} session.query(models.QualityOfServiceSpecs).\ filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id, models.QualityOfServiceSpecs.specs_id == qos_specs_id)).\ update(updated_values) del updated_values['updated_at'] return updated_values @require_admin_context def _qos_specs_get_item(context, qos_specs_id, key, session=None): result = model_query(context, models.QualityOfServiceSpecs, session=session). \ filter(models.QualityOfServiceSpecs.key == key). \ filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ first() if not result: raise exception.QoSSpecsKeyNotFound( specs_key=key, specs_id=qos_specs_id) return result @handle_db_data_error @require_admin_context def qos_specs_update(context, qos_specs_id, updates): session = get_session() with session.begin(): # make sure qos specs exists exists = resource_exists(context, models.QualityOfServiceSpecs, qos_specs_id, session) if not exists: raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) specs = updates.get('specs', {}) if 'consumer' in updates: # Massage consumer to the right place for DB and copy specs # before updating so we don't modify dict for caller specs = specs.copy() specs['consumer'] = updates['consumer'] spec_ref = None for key in specs.keys(): try: spec_ref = _qos_specs_get_item( context, qos_specs_id, key, session) except exception.QoSSpecsKeyNotFound: spec_ref = models.QualityOfServiceSpecs() id = None if spec_ref.get('id', None): id = spec_ref['id'] else: id = str(uuid.uuid4()) value = dict(id=id, key=key, value=specs[key], specs_id=qos_specs_id, deleted=False) LOG.debug('qos_specs_update() value: %s', value) spec_ref.update(value) spec_ref.save(session=session) return specs eted="no").\ filter_by(volume_type_id=volume_type_id).first() @require_admin_context def volume_type_encryption_delete(context, volume_type_id): session = get_session() with session.begin(): encryption = volume_type_encryption_get(context, volume_type_id, session) if not encryption: raise exception.VolumeTypeEncryptionNotFound( type_id=volume_type_id) encryption.update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @handle_db_data_error @require_admin_context def volume_type_encryption_create(context, volume_type_id, values): session = get_session() with session.begin(): encryption = models.Encryption() if 'volume_type_id' not in values: values['volume_type_id'] = volume_type_id if 'encryption_id' not in values: values['encryption_id'] = six.text_type(uuid.uuid4()) encryption.update(values) session.add(encryption) return encryption @handle_db_data_error @require_admin_context def volume_type_encryption_update(context, volume_type_id, values): query = model_query(context, models.Encryption) result = query.filter_by(volume_type_id=volume_type_id).update(values) if not result: raise exception.VolumeTypeEncryptionNotFound(type_id=volume_type_id) def volume_type_encryption_volume_get(context, volume_type_id, session=None): volume_list = _volume_get_query(context, session=session, project_only=False).\ filter_by(volume_type_id=volume_type_id).\ all() return volume_list ntext, volume_ref['volume_type_id']) values = { 'encryption_key_id': volume_ref['encryption_key_id'], } if encryption_ref: for key in ['control_location', 'cipher', 'key_size', 'provider']: values[key] = encryption_ref[key] return values session=session) if is_user_context(context): query = query.filter( models.Volume.id == models.VolumeGlanceMetadata.volume_id, models.Volume.project_id == context.project_id) return query.all() @require_context def volume_glance_metadata_get_all(context): return _volume_glance_metadata_get_all(context) @require_context def volume_glance_metadata_list_get(context, volume_id_list): query = model_query(context, models.VolumeGlanceMetadata, session=None) query = query.filter( models.VolumeGlanceMetadata.volume_id.in_(volume_id_list)) return query.all() @require_context @require_volume_exists def _volume_glance_metadata_get(context, volume_id, session=None): rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ all() if not rows: raise exception.GlanceMetadataNotFound(id=volume_id) return rows @require_context def volume_glance_metadata_get(context, volume_id): return _volume_glance_metadata_get(context, volume_id) @require_context @require_snapshot_exists def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ filter_by(snapshot_id=snapshot_id).\ filter_by(deleted=False).\ all() if not rows: raise exception.GlanceMetadataNotFound(id=snapshot_id) return rows @require_context def volume_snapshot_glance_metadata_get(context, snapshot_id): return _volume_snapshot_glance_metadata_get(context, snapshot_id) @require_context @require_volume_exists def volume_glance_metadata_create(context, volume_id, key, value): session = get_session() with session.begin(): rows = session.query(models.VolumeGlanceMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).all() if len(rows) > 0: raise exception.GlanceMetadataExists(key=key, volume_id=volume_id) vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = key vol_glance_metadata.value = six.text_type(value) session.add(vol_glance_metadata) return @require_context @require_volume_exists def volume_glance_metadata_bulk_create(context, volume_id, metadata): session = get_session() with session.begin(): for (key, value) in metadata.items(): rows = session.query(models.VolumeGlanceMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).all() if len(rows) > 0: raise exception.GlanceMetadataExists(key=key, volume_id=volume_id) vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = key vol_glance_metadata.value = six.text_type(value) session.add(vol_glance_metadata) @require_context @require_snapshot_exists def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): session = get_session() with session.begin(): metadata = _volume_glance_metadata_get(context, volume_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.snapshot_id = snapshot_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context def volume_glance_metadata_copy_from_volume_to_volume(context, src_volume_id, volume_id): session = get_session() with session.begin(): metadata = _volume_glance_metadata_get(context, src_volume_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context @require_volume_exists def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): session = get_session() with session.begin(): metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context def volume_glance_metadata_delete_by_volume(context, volume_id): model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ filter_by(snapshot_id=snapshot_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) d=read_deleted).options( joinedload('backup_metadata')).filter_by(id=backup_id).first() if not result: raise exception.BackupNotFound(backup_id=backup_id) return result def _backup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.Backup, filters): return [] session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Backup) if query is None: return [] return query.all() def _backups_get_query(context, session=None, project_only=False): return model_query( context, models.Backup, session=session, project_only=project_only).options(joinedload('backup_metadata')) @apply_like_filters(model=models.Backup) def _process_backups_filters(query, filters): if filters: if not is_valid_model_filters(models.Backup, filters): return filters_dict = {} for key, value in filters.items(): if key == 'metadata': col_attr = getattr(models.Backup, 'backup_metadata') for k, v in value.items(): query = query.filter(col_attr.any(key=k, value=v)) else: filters_dict[key] = value # Apply exact matches if filters_dict: query = query.filter_by(**filters_dict) return query @require_admin_context def backup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return _backup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_admin_context def backup_get_all_by_host(context, host): return model_query( context, models.Backup).options( joinedload('backup_metadata')).filter_by(host=host).all() @require_context def backup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _backup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def backup_get_all_by_volume(context, volume_id, filters=None): authorize_project_context(context, volume_id) if not filters: filters = {} else: filters = filters.copy() filters['volume_id'] = volume_id return _backup_get_all(context, filters) @require_context def backup_get_all_active_by_window(context, begin, end=None, project_id=None): query = model_query(context, models.Backup, read_deleted="yes").options( joinedload('backup_metadata')) query = query.filter(or_(models.Backup.deleted_at == None, # noqa models.Backup.deleted_at > begin)) if end: query = query.filter(models.Backup.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @handle_db_data_error @require_context def backup_create(context, values): values['backup_metadata'] = _metadata_refs(values.get('metadata'), models.BackupMetadata) if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): backup_ref = models.Backup() backup_ref.update(values) session.add(backup_ref) return _backup_get(context, values['id'], session=session) @handle_db_data_error @require_context def backup_update(context, backup_id, values): if 'fail_reason' in values: values = values.copy() values['fail_reason'] = (values['fail_reason'] or '')[:255] query = model_query(context, models.Backup, read_deleted="yes") result = query.filter_by(id=backup_id).update(values) if not result: raise exception.BackupNotFound(backup_id=backup_id) @require_admin_context def backup_destroy(context, backup_id): utcnow = timeutils.utcnow() updated_values = {'status': fields.BackupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} session = get_session() with session.begin(): model_query(context, models.Backup, session=session).\ filter_by(id=backup_id).\ update(updated_values) model_query(context, models.BackupMetadata, session=session).\ filter_by(backup_id=backup_id).\ update({'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values @require_context @require_backup_exists def backup_metadata_get(context, backup_id): return _backup_metadata_get(context, backup_id) @require_context def _backup_metadata_get(context, backup_id, session=None): rows = _backup_metadata_get_query(context, backup_id, session).all() result = {} for row in rows: result[row['key']] = row['value'] return result def _backup_metadata_get_query(context, backup_id, session=None): return model_query( context, models.BackupMetadata, session=session, read_deleted="no").filter_by(backup_id=backup_id) @require_context def _backup_metadata_get_item(context, backup_id, key, session=None): result = _backup_metadata_get_query( context, backup_id, session=session).filter_by(key=key).first() if not result: raise exception.BackupMetadataNotFound(metadata_key=key, backup_id=backup_id) return result @require_context @require_backup_exists @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def backup_metadata_update(context, backup_id, metadata, delete): session = get_session() with session.begin(): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _backup_metadata_get(context, backup_id, session) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _backup_metadata_get_item(context, backup_id, meta_key, session) meta_ref.update({'deleted': True, 'deleted_at': timeutils.utcnow()}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _backup_metadata_get_item(context, backup_id, meta_key, session) except exception.BackupMetadataNotFound: meta_ref = models.BackupMetadata() item.update({"key": meta_key, "backup_id": backup_id}) meta_ref.update(item) meta_ref.save(session=session) return backup_metadata_get(context, backup_id) ############################### @require_context def _transfer_get(context, transfer_id, session=None): query = model_query(context, models.Transfer, session=session).\ filter_by(id=transfer_id) if not is_admin_context(context): volume = models.Volume query = query.filter(models.Transfer.volume_id == volume.id, volume.project_id == context.project_id) result = query.first() if not result: raise exception.TransferNotFound(transfer_id=transfer_id) return result @require_context def transfer_get(context, transfer_id): return _transfer_get(context, transfer_id) def _process_transfer_filters(query, filters): if filters: project_id = filters.pop('project_id', None) # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Transfer, filters): return if project_id: volume = models.Volume query = query.filter(volume.id == models.Transfer.volume_id, volume.project_id == project_id) query = query.filter_by(**filters) return query def _translate_transfers(transfers): fields = ('id', 'volume_id', 'display_name', 'created_at', 'deleted', 'no_snapshots', 'source_project_id', 'destination_project_id', 'accepted') return [{k: transfer[k] for k in fields} for transfer in transfers] def _transfer_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Transfer) if query is None: return [] return _translate_transfers(query.all()) @require_admin_context def transfer_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): return _transfer_get_all(context, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) def _transfer_get_query(context, session=None, project_only=False): return model_query(context, models.Transfer, session=session, project_only=project_only) @require_context def transfer_get_all_by_project(context, project_id, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): authorize_project_context(context, project_id) filters = filters.copy() if filters else {} filters['project_id'] = project_id return _transfer_get_all(context, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) @require_context @handle_db_data_error def transfer_create(context, values): if not values.get('id'): values['id'] = str(uuid.uuid4()) transfer_id = values['id'] volume_id = values['volume_id'] session = get_session() with session.begin(): expected = {'id': volume_id, 'status': 'available'} update = {'status': 'awaiting-transfer'} if not conditional_update(context, models.Volume, update, expected): msg = (_('Transfer %(transfer_id)s: Volume id %(volume_id)s ' 'expected in available state.') % {'transfer_id': transfer_id, 'volume_id': volume_id}) LOG.error(msg) raise exception.InvalidVolume(reason=msg) transfer = models.Transfer() transfer.update(values) session.add(transfer) return transfer @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def transfer_destroy(context, transfer_id): utcnow = timeutils.utcnow() session = get_session() with session.begin(): volume_id = _transfer_get(context, transfer_id, session)['volume_id'] expected = {'id': volume_id, 'status': 'awaiting-transfer'} update = {'status': 'available'} if not conditional_update(context, models.Volume, update, expected): # but we can still mark the transfer record as deleted. msg = (_('Transfer %(transfer_id)s: Volume expected in ' 'awaiting-transfer state.') % {'transfer_id': transfer_id}) LOG.error(msg) updated_values = {'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} (model_query(context, models.Transfer, session=session) .filter_by(id=transfer_id) .update(updated_values)) del updated_values['updated_at'] return updated_values def _roll_back_transferred_volume_and_snapshots(context, volume_id, old_user_id, old_project_id, transffered_snapshots): expected = {'id': volume_id, 'status': 'available'} update = {'status': 'awaiting-transfer', 'user_id': old_user_id, 'project_id': old_project_id, 'updated_at': timeutils.utcnow()} if not conditional_update(context, models.Volume, update, expected): LOG.warning('Volume: %(volume_id)s is not in the expected available ' 'status. Rolling it back.', {'volume_id': volume_id}) return for snapshot_id in transffered_snapshots: LOG.info('Beginning to roll back transferred snapshots: %s', snapshot_id) expected = {'id': snapshot_id, 'status': 'available'} update = {'user_id': old_user_id, 'project_id': old_project_id, 'updated_at': timeutils.utcnow()} if not conditional_update(context, models.Snapshot, update, expected): LOG.warning('Snapshot: %(snapshot_id)s is not in the expected ' 'available state. Rolling it back.', {'snapshot_id': snapshot_id}) return @require_context def transfer_accept(context, transfer_id, user_id, project_id, no_snapshots=False): session = get_session() with session.begin(): volume_id = _transfer_get(context, transfer_id, session)['volume_id'] expected = {'id': volume_id, 'status': 'awaiting-transfer'} update = {'status': 'available', 'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow()} if not conditional_update(context, models.Volume, update, expected): msg = (_('Transfer %(transfer_id)s: Volume id %(volume_id)s ' 'expected in awaiting-transfer state.') % {'transfer_id': transfer_id, 'volume_id': volume_id}) LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Update snapshots for transfer snapshots with volume. if not no_snapshots: snapshots = snapshot_get_all_for_volume(context, volume_id) transferred_snapshots = [] for snapshot in snapshots: LOG.info('Begin to transfer snapshot: %s', snapshot['id']) old_user_id = snapshot['user_id'] old_project_id = snapshot['project_id'] expected = {'id': snapshot['id'], 'status': 'available'} update = {'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow()} if not conditional_update(context, models.Snapshot, update, expected): msg = (_('Transfer %(transfer_id)s: Snapshot ' '%(snapshot_id)s is not in the expected ' 'available state.') % {'transfer_id': transfer_id, 'snapshot_id': snapshot['id']}) LOG.error(msg) _roll_back_transferred_volume_and_snapshots( context, volume_id, old_user_id, old_project_id, transferred_snapshots) raise exception.InvalidSnapshot(reason=msg) transferred_snapshots.append(snapshot['id']) (session.query(models.Transfer) .filter_by(id=transfer_id) .update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at'), 'destination_project_id': project_id, 'accepted': True})) ############################### @require_admin_context def _consistencygroup_data_get_for_project(context, project_id, session=None): query = model_query(context, func.count(models.ConsistencyGroup.id), read_deleted="no", session=session).\ filter_by(project_id=project_id) result = query.first() return (0, result[0] or 0) @require_context def _consistencygroup_get(context, consistencygroup_id, session=None): result = model_query(context, models.ConsistencyGroup, session=session, project_only=True).\ filter_by(id=consistencygroup_id).\ first() if not result: raise exception.ConsistencyGroupNotFound( consistencygroup_id=consistencygroup_id) return result @require_context def consistencygroup_get(context, consistencygroup_id): return _consistencygroup_get(context, consistencygroup_id) def _consistencygroups_get_query(context, session=None, project_only=False): return model_query(context, models.ConsistencyGroup, session=session, project_only=project_only) def _process_consistencygroups_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.ConsistencyGroup, filters): return query = query.filter_by(**filters) return query def _consistencygroup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.ConsistencyGroup, filters): return [] session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.ConsistencyGroup) if query is None: return [] return query.all() @require_admin_context def consistencygroup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return _consistencygroup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def consistencygroup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _consistencygroup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @handle_db_data_error @require_context def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None): cg_model = models.ConsistencyGroup values = values.copy() if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): if cg_snap_id: conditions = [cg_model.id == models.CGSnapshot.consistencygroup_id, models.CGSnapshot.id == cg_snap_id] elif cg_id: conditions = [cg_model.id == cg_id] else: conditions = None if conditions: names = ['volume_type_id', 'availability_zone', 'host', 'cluster_name'] for name in names: values.pop(name, None) fields = [getattr(cg_model, name) for name in names] fields.extend(bindparam(k, v) for k, v in values.items()) sel = session.query(*fields).filter(*conditions) names.extend(values.keys()) insert_stmt = cg_model.__table__.insert().from_select(names, sel) result = session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise if not result.rowcount: if cg_id: raise exception.ConsistencyGroupNotFound( consistencygroup_id=cg_id) raise exception.CgSnapshotNotFound(cgsnapshot_id=cg_snap_id) else: consistencygroup = cg_model() consistencygroup.update(values) session.add(consistencygroup) return _consistencygroup_get(context, values['id'], session=session) @handle_db_data_error @require_context def consistencygroup_update(context, consistencygroup_id, values): query = model_query(context, models.ConsistencyGroup, project_only=True) result = query.filter_by(id=consistencygroup_id).update(values) if not result: raise exception.ConsistencyGroupNotFound( consistencygroup_id=consistencygroup_id) @require_admin_context def consistencygroup_destroy(context, consistencygroup_id): utcnow = timeutils.utcnow() session = get_session() with session.begin(): updated_values = {'status': fields.ConsistencyGroupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')} model_query(context, models.ConsistencyGroup, session=session).\ filter_by(id=consistencygroup_id).\ update({'status': fields.ConsistencyGroupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values @require_admin_context def cg_cgsnapshot_destroy_all_by_ids(context, cg_ids, cgsnapshot_ids, volume_ids, snapshot_ids, session): utcnow = timeutils.utcnow() if snapshot_ids: snaps = (model_query(context, models.Snapshot, session=session, read_deleted="no"). filter(models.Snapshot.id.in_(snapshot_ids)). all()) for snap in snaps: snap.update({'cgsnapshot_id': None, 'updated_at': utcnow}) if cgsnapshot_ids: cg_snaps = (model_query(context, models.CGSnapshot, session=session, read_deleted="no"). filter(models.CGSnapshot.id.in_(cgsnapshot_ids)). all()) for cg_snap in cg_snaps: cg_snap.delete(session=session) if volume_ids: vols = (model_query(context, models.Volume, session=session, read_deleted="no"). filter(models.Volume.id.in_(volume_ids)). all()) for vol in vols: vol.update({'consistencygroup_id': None, 'updated_at': utcnow}) if cg_ids: cgs = (model_query(context, models.ConsistencyGroup, session=session, read_deleted="no"). filter(models.ConsistencyGroup.id.in_(cg_ids)). all()) for cg in cgs: cg.delete(session=session) def cg_has_cgsnapshot_filter(): return sql.exists().where(and_( models.CGSnapshot.consistencygroup_id == models.ConsistencyGroup.id, ~models.CGSnapshot.deleted)) def cg_has_volumes_filter(attached_or_with_snapshots=False): query = sql.exists().where( and_(models.Volume.consistencygroup_id == models.ConsistencyGroup.id, ~models.Volume.deleted)) if attached_or_with_snapshots: query = query.where(or_( models.Volume.attach_status == 'attached', sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted)))) return query def cg_creating_from_src(cg_id=None, cgsnapshot_id=None): subq = sql.select([models.ConsistencyGroup]).where( and_(~models.ConsistencyGroup.deleted, models.ConsistencyGroup.status == 'creating')).alias('cg2') if cg_id: match_id = subq.c.source_cgid == cg_id elif cgsnapshot_id: match_id = subq.c.cgsnapshot_id == cgsnapshot_id else: msg = _('cg_creating_from_src must be called with cg_id or ' 'cgsnapshot_id parameter.') raise exception.ProgrammingError(reason=msg) return sql.exists([subq]).where(match_id) @require_admin_context def consistencygroup_include_in_cluster(context, cluster, partial_rename=True, **filters): return _include_in_cluster(context, cluster, models.ConsistencyGroup, partial_rename, filters) @require_admin_context def group_include_in_cluster(context, cluster, partial_rename=True, **filters): return _include_in_cluster(context, cluster, models.Group, partial_rename, filters) : result = (model_query(context, models.Group, session=session, project_only=True). filter_by(id=group_id). first()) if not result: raise exception.GroupNotFound(group_id=group_id) return result @require_context def group_get(context, group_id): return _group_get(context, group_id) def _groups_get_query(context, session=None, project_only=False): return model_query(context, models.Group, session=session, project_only=project_only) def _group_snapshot_get_query(context, session=None, project_only=False): return model_query(context, models.GroupSnapshot, session=session, project_only=project_only) @apply_like_filters(model=models.Group) def _process_groups_filters(query, filters): if filters: backend_match_level = filters.pop('backend_match_level', 'backend') host = filters.pop('host', None) if host: query = query.filter(_filter_host(models.Group.host, host, match_level=backend_match_level)) if not is_valid_model_filters(models.Group, filters): return query = query.filter_by(**filters) return query @apply_like_filters(model=models.GroupSnapshot) def _process_group_snapshot_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.GroupSnapshot, filters): return query = query.filter_by(**filters) return query def _group_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.Group) return query.all() if query else [] @require_admin_context def group_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return _group_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def group_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _group_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @handle_db_data_error @require_context def group_create(context, values, group_snapshot_id=None, source_group_id=None): group_model = models.Group values = values.copy() if not values.get('id'): values['id'] = six.text_type(uuid.uuid4()) session = get_session() with session.begin(): if group_snapshot_id: conditions = [group_model.id == models.GroupSnapshot.group_id, models.GroupSnapshot.id == group_snapshot_id] elif source_group_id: conditions = [group_model.id == source_group_id] else: conditions = None if conditions: values.pop('group_type_id', None) values.pop('availability_zone', None) values.pop('host', None) # NOTE(xyang): Save volume_type_ids to update later. volume_type_ids = values.pop('volume_type_ids', []) sel = session.query(group_model.group_type_id, group_model.availability_zone, group_model.host, *(bindparam(k, v) for k, v in values.items()) ).filter(*conditions) names = ['group_type_id', 'availability_zone', 'host'] names.extend(values.keys()) insert_stmt = group_model.__table__.insert().from_select( names, sel) result = session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise if not result.rowcount: if source_group_id: raise exception.GroupNotFound( group_id=source_group_id) raise exception.GroupSnapshotNotFound( group_snapshot_id=group_snapshot_id) for item in volume_type_ids: mapping = models.GroupVolumeTypeMapping() mapping['volume_type_id'] = item mapping['group_id'] = values['id'] session.add(mapping) else: for item in values.get('volume_type_ids') or []: mapping = models.GroupVolumeTypeMapping() mapping['volume_type_id'] = item mapping['group_id'] = values['id'] session.add(mapping) group = group_model() group.update(values) session.add(group) return _group_get(context, values['id'], session=session) @handle_db_data_error @require_context def group_volume_type_mapping_create(context, group_id, volume_type_id): _group_get(context, group_id) _volume_type_get_id_from_volume_type(context, volume_type_id) existing = _group_volume_type_mapping_get_all_by_group_volume_type( context, group_id, volume_type_id) if existing: raise exception.GroupVolumeTypeMappingExists( group_id=group_id, volume_type_id=volume_type_id) mapping = models.GroupVolumeTypeMapping() mapping.update({"group_id": group_id, "volume_type_id": volume_type_id}) session = get_session() with session.begin(): try: mapping.save(session=session) except db_exc.DBDuplicateEntry: raise exception.GroupVolumeTypeMappingExists( group_id=group_id, volume_type_id=volume_type_id) return mapping @handle_db_data_error @require_context def group_update(context, group_id, values): query = model_query(context, models.Group, project_only=True) result = query.filter_by(id=group_id).update(values) if not result: raise exception.GroupNotFound(group_id=group_id) @require_admin_context def group_destroy(context, group_id): session = get_session() with session.begin(): (model_query(context, models.Group, session=session). filter_by(id=group_id). update({'status': fields.GroupStatus.DELETED, 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) (session.query(models.GroupVolumeTypeMapping). filter_by(group_id=group_id). update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})) def group_has_group_snapshot_filter(): return sql.exists().where(and_( models.GroupSnapshot.group_id == models.Group.id, ~models.GroupSnapshot.deleted)) def group_has_volumes_filter(attached_or_with_snapshots=False): query = sql.exists().where( and_(models.Volume.group_id == models.Group.id, ~models.Volume.deleted)) if attached_or_with_snapshots: query = query.where(or_( models.Volume.attach_status == 'attached', sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted)))) return query def group_creating_from_src(group_id=None, group_snapshot_id=None): subq = sql.select([models.Group]).where( and_(~models.Group.deleted, models.Group.status == 'creating')).alias('group2') if group_id: match_id = subq.c.source_group_id == group_id elif group_snapshot_id: match_id = subq.c.group_snapshot_id == group_snapshot_id else: msg = _('group_creating_from_src must be called with group_id or ' 'group_snapshot_id parameter.') raise exception.ProgrammingError(reason=msg) return sql.exists([subq]).where(match_id) shot_id) def is_valid_model_filters(model, filters, exclude_list=None): for key in filters.keys(): if exclude_list and key in exclude_list: continue if key == 'metadata': if not isinstance(filters[key], dict): LOG.debug("Metadata filter value is not valid dictionary") return False continue try: key = key.rstrip('~') getattr(model, key) except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return False return True def _cgsnapshot_get_all(context, project_id=None, group_id=None, filters=None): query = model_query(context, models.CGSnapshot) if filters: if not is_valid_model_filters(models.CGSnapshot, filters): return [] query = query.filter_by(**filters) if project_id: query = query.filter_by(project_id=project_id) if group_id: query = query.filter_by(consistencygroup_id=group_id) return query.all() @require_admin_context def cgsnapshot_get_all(context, filters=None): return _cgsnapshot_get_all(context, filters=filters) @require_admin_context def cgsnapshot_get_all_by_group(context, group_id, filters=None): return _cgsnapshot_get_all(context, group_id=group_id, filters=filters) @require_context def cgsnapshot_get_all_by_project(context, project_id, filters=None): authorize_project_context(context, project_id) return _cgsnapshot_get_all(context, project_id=project_id, filters=filters) @handle_db_data_error @require_context def cgsnapshot_create(context, values): if not values.get('id'): values['id'] = str(uuid.uuid4()) cg_id = values.get('consistencygroup_id') session = get_session() model = models.CGSnapshot with session.begin(): if cg_id: conditions = [ sql.exists().where(and_( ~models.Volume.deleted, models.Volume.consistencygroup_id == cg_id)), ~models.ConsistencyGroup.deleted, models.ConsistencyGroup.id == cg_id, ~models.ConsistencyGroup.status.in_(('creating', 'updating'))] binds = (bindparam(k, v) for k, v in values.items()) sel = session.query(*binds).filter(*conditions) insert_stmt = model.__table__.insert().from_select(values.keys(), sel) result = session.execute(insert_stmt) # the right exception if not result.rowcount: msg = _("Source CG cannot be empty or in 'creating' or " "'updating' state. No cgsnapshot will be created.") raise exception.InvalidConsistencyGroup(reason=msg) else: cgsnapshot = model() cgsnapshot.update(values) session.add(cgsnapshot) return _cgsnapshot_get(context, values['id'], session=session) @require_context @handle_db_data_error def cgsnapshot_update(context, cgsnapshot_id, values): query = model_query(context, models.CGSnapshot, project_only=True) result = query.filter_by(id=cgsnapshot_id).update(values) if not result: raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) @require_admin_context def cgsnapshot_destroy(context, cgsnapshot_id): session = get_session() with session.begin(): updated_values = {'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')} model_query(context, models.CGSnapshot, session=session).\ filter_by(id=cgsnapshot_id).\ update(updated_values) del updated_values['updated_at'] return updated_values def cgsnapshot_creating_from_src(): return sql.exists().where(and_( models.CGSnapshot.consistencygroup_id == models.ConsistencyGroup.id, ~models.CGSnapshot.deleted, models.CGSnapshot.status == 'creating')) ############################### @require_context def _group_snapshot_get(context, group_snapshot_id, session=None): result = model_query(context, models.GroupSnapshot, session=session, project_only=True).\ filter_by(id=group_snapshot_id).\ first() if not result: raise exception.GroupSnapshotNotFound( group_snapshot_id=group_snapshot_id) return result @require_context def group_snapshot_get(context, group_snapshot_id): return _group_snapshot_get(context, group_snapshot_id) def _group_snapshot_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters and not is_valid_model_filters(models.GroupSnapshot, filters): return [] session = get_session() with session.begin(): # Generate the paginate query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, models.GroupSnapshot) return query.all() if query else [] @require_admin_context def group_snapshot_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return _group_snapshot_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_admin_context def group_snapshot_get_all_by_group(context, group_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): if filters is None: filters = {} if group_id: filters['group_id'] = group_id return _group_snapshot_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @require_context def group_snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): authorize_project_context(context, project_id) if filters is None: filters = {} if project_id: filters['project_id'] = project_id return _group_snapshot_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) @handle_db_data_error @require_context def group_snapshot_create(context, values): if not values.get('id'): values['id'] = six.text_type(uuid.uuid4()) group_id = values.get('group_id') session = get_session() model = models.GroupSnapshot with session.begin(): if group_id: # There has to exist at least 1 volume in the group and the group # cannot be updating the composing volumes or being created. conditions = [ sql.exists().where(and_( ~models.Volume.deleted, models.Volume.group_id == group_id)), ~models.Group.deleted, models.Group.id == group_id, ~models.Group.status.in_(('creating', 'updating'))] # NOTE(geguileo): We build a "fake" from_select clause instead of # using transaction isolation on the session because we would need # SERIALIZABLE level and that would have a considerable performance # penalty. binds = (bindparam(k, v) for k, v in values.items()) sel = session.query(*binds).filter(*conditions) insert_stmt = model.__table__.insert().from_select(values.keys(), sel) result = session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise if not result.rowcount: msg = _("Source group cannot be empty or in 'creating' or " "'updating' state. No group snapshot will be created.") raise exception.InvalidGroup(reason=msg) else: group_snapshot = model() group_snapshot.update(values) session.add(group_snapshot) return _group_snapshot_get(context, values['id'], session=session) @require_context @handle_db_data_error def group_snapshot_update(context, group_snapshot_id, values): session = get_session() with session.begin(): result = model_query(context, models.GroupSnapshot, project_only=True).\ filter_by(id=group_snapshot_id).\ first() if not result: raise exception.GroupSnapshotNotFound( _("No group snapshot with id %s") % group_snapshot_id) result.update(values) result.save(session=session) return result @require_admin_context def group_snapshot_destroy(context, group_snapshot_id): session = get_session() with session.begin(): updated_values = {'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')} model_query(context, models.GroupSnapshot, session=session).\ filter_by(id=group_snapshot_id).\ update(updated_values) del updated_values['updated_at'] return updated_values def group_snapshot_creating_from_src(): return sql.exists().where(and_( models.GroupSnapshot.group_id == models.Group.id, ~models.GroupSnapshot.deleted, models.GroupSnapshot.status == 'creating')) f 'deleted' not in table.columns.keys(): continue LOG.info('Purging deleted rows older than age=%(age)d days ' 'from table=%(table)s', {'age': age_in_days, 'table': table}) deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days) try: with session.begin(): if six.text_type(table) == "quality_of_service_specs": session.query(models.QualityOfServiceSpecs).filter( and_(models.QualityOfServiceSpecs.specs_id.isnot( None), models.QualityOfServiceSpecs. deleted.is_(True), models.QualityOfServiceSpecs. deleted_at < deleted_age)).delete() result = session.execute( table.delete() .where(table.c.deleted_at < deleted_age)) except db_exc.DBReferenceError as ex: LOG.error('DBError detected when purging from ' '%(tablename)s: %(error)s.', {'tablename': table, 'error': ex}) raise rows_purged = result.rowcount if rows_purged != 0: LOG.info("Deleted %(row)d rows from table=%(table)s", {'row': rows_purged, 'table': table}) ailable( 'Service for host %(host)s must first be frozen.' % {'host': backend_host}) actions = { 'disabled': False, 'disabled_reason': '', 'active_backend_id': None, 'replication_status': 'enabled', } expectations = { 'frozen': True, 'disabled': True, } if service.is_clustered: service.cluster.conditional_update(actions, expectations) service.cluster.reset_service_replication() else: service.conditional_update(actions, expectations) 'action_id': message['action_id'], 'message_level': message['message_level'], 'created_at': message['created_at'], 'expires_at': message.get('expires_at'), } def _message_get(context, message_id, session=None): query = model_query(context, models.Message, read_deleted="no", project_only="yes", session=session) result = query.filter_by(id=message_id).first() if not result: raise exception.MessageNotFound(message_id=message_id) return result @require_context def message_get(context, message_id, session=None): result = _message_get(context, message_id, session) return _translate_message(result) @require_context def message_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): messages = models.Message session = get_session() with session.begin(): query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset, messages) if query is None: return [] results = query.all() return _translate_messages(results) @apply_like_filters(model=models.Message) def _process_messages_filters(query, filters): if filters: if not is_valid_model_filters(models.Message, filters): return None query = query.filter_by(**filters) return query def _messages_get_query(context, session=None, project_only=False): return model_query(context, models.Message, session=session, project_only=project_only) @require_context def message_create(context, values): message_ref = models.Message() if not values.get('id'): values['id'] = str(uuid.uuid4()) message_ref.update(values) session = get_session() with session.begin(): session.add(message_ref) @require_admin_context def message_destroy(context, message): session = get_session() now = timeutils.utcnow() with session.begin(): updated_values = {'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')} (model_query(context, models.Message, session=session). filter_by(id=message.get('id')). update(updated_values)) del updated_values['updated_at'] return updated_values @require_admin_context def cleanup_expired_messages(context): session = get_session() now = timeutils.utcnow() with session.begin(): # NOTE(tommylikehu): Directly delete the expired # messages here. return session.query(models.Message).filter( models.Message.expires_at < now).delete() ############################### @require_context def driver_initiator_data_insert_by_key(context, initiator, namespace, key, value): data = models.DriverInitiatorData() data.initiator = initiator data.namespace = namespace data.key = key data.value = value session = get_session() try: with session.begin(): session.add(data) return True except db_exc.DBDuplicateEntry: return False @require_context def driver_initiator_data_get(context, initiator, namespace): session = get_session() with session.begin(): return session.query(models.DriverInitiatorData).\ filter_by(initiator=initiator).\ filter_by(namespace=namespace).\ all() ############################### PAGINATION_HELPERS = { models.Volume: (_volume_get_query, _process_volume_filters, _volume_get), models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get), models.Backup: (_backups_get_query, _process_backups_filters, _backup_get), models.QualityOfServiceSpecs: (_qos_specs_get_query, _process_qos_specs_filters, _qos_specs_get), models.VolumeType: (_volume_type_get_query, _process_volume_types_filters, _volume_type_get_db_object), models.ConsistencyGroup: (_consistencygroups_get_query, _process_consistencygroups_filters, _consistencygroup_get), models.Message: (_messages_get_query, _process_messages_filters, _message_get), models.GroupType: (_group_type_get_query, _process_group_types_filters, _group_type_get_db_object), models.Group: (_groups_get_query, _process_groups_filters, _group_get), models.GroupSnapshot: (_group_snapshot_get_query, _process_group_snapshot_filters, _group_snapshot_get), models.VolumeAttachment: (_attachment_get_query, _process_attachment_filters, _attachment_get), models.Transfer: (_transfer_get_query, _process_transfer_filters, _transfer_get), } CALCULATE_COUNT_HELPERS = { 'volume': (_volume_get_query, _process_volume_filters), 'snapshot': (_snaps_get_query, _process_snaps_filters), 'backup': (_backups_get_query, _process_backups_filters), } ############################### @require_context def image_volume_cache_create(context, host, cluster_name, image_id, image_updated_at, volume_id, size): session = get_session() with session.begin(): cache_entry = models.ImageVolumeCacheEntry() cache_entry.host = host cache_entry.cluster_name = cluster_name cache_entry.image_id = image_id cache_entry.image_updated_at = image_updated_at cache_entry.volume_id = volume_id cache_entry.size = size session.add(cache_entry) return cache_entry @require_context def image_volume_cache_delete(context, volume_id): session = get_session() with session.begin(): session.query(models.ImageVolumeCacheEntry).\ filter_by(volume_id=volume_id).\ delete() @require_context def image_volume_cache_get_and_update_last_used(context, image_id, **filters): filters = _clean_filters(filters) session = get_session() with session.begin(): entry = session.query(models.ImageVolumeCacheEntry).\ filter_by(image_id=image_id).\ filter_by(**filters).\ order_by(desc(models.ImageVolumeCacheEntry.last_used)).\ first() if entry: entry.last_used = timeutils.utcnow() entry.save(session=session) return entry @require_context def image_volume_cache_get_by_volume_id(context, volume_id): session = get_session() with session.begin(): return session.query(models.ImageVolumeCacheEntry).\ filter_by(volume_id=volume_id).\ first() @require_context def image_volume_cache_get_all(context, **filters): filters = _clean_filters(filters) session = get_session() with session.begin(): return session.query(models.ImageVolumeCacheEntry).\ filter_by(**filters).\ order_by(desc(models.ImageVolumeCacheEntry.last_used)).\ all() @require_admin_context def image_volume_cache_include_in_cluster(context, cluster, partial_rename=True, **filters): filters = _clean_filters(filters) return _include_in_cluster(context, cluster, models.ImageVolumeCacheEntry, partial_rename, filters) ################### def _worker_query(context, session=None, until=None, db_filters=None, ignore_sentinel=True, **filters): # Remove all filters based on the workers table that are set to None filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Worker, filters): return None query = model_query(context, models.Worker, session=session) # TODO(geguileo): Once we remove support for MySQL 5.5 we can remove this if ignore_sentinel: # We don't want to retrieve the workers sentinel query = query.filter(models.Worker.resource_type != 'SENTINEL') if until: db_filters = list(db_filters) if db_filters else [] # created_at field. db_filters.append(models.Worker.updated_at <= until) if db_filters: query = query.filter(and_(*db_filters)) if filters: query = query.filter_by(**filters) return query DB_SUPPORTS_SUBSECOND_RESOLUTION = True def workers_init(): global DB_SUPPORTS_SUBSECOND_RESOLUTION session = get_session() query = session.query(models.Worker).filter_by(resource_type='SENTINEL') worker = query.first() DB_SUPPORTS_SUBSECOND_RESOLUTION = bool(worker.updated_at.microsecond) def _worker_set_updated_at_field(values): # TODO(geguileo): Once we drop support for MySQL 5.5 we can simplify this # method. updated_at = values.get('updated_at', timeutils.utcnow()) if isinstance(updated_at, six.string_types): return if not DB_SUPPORTS_SUBSECOND_RESOLUTION: updated_at = updated_at.replace(microsecond=0) values['updated_at'] = updated_at @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def worker_create(context, **values): _worker_set_updated_at_field(values) worker = models.Worker(**values) session = get_session() try: with session.begin(): worker.save(session) except db_exc.DBDuplicateEntry: raise exception.WorkerExists(type=values.get('resource_type'), id=values.get('resource_id')) return worker def worker_get(context, **filters): query = _worker_query(context, **filters) worker = query.first() if query else None if not worker: raise exception.WorkerNotFound(**filters) return worker def worker_get_all(context, **filters): query = _worker_query(context, **filters) return query.all() if query else [] def _orm_worker_update(worker, values): if not worker: return for key, value in values.items(): setattr(worker, key, value) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def worker_update(context, id, filters=None, orm_worker=None, **values): filters = filters or {} query = _worker_query(context, id=id, **filters) # If we want to update the orm_worker and we don't set the update_at field _worker_set_updated_at_field(values) reference = orm_worker or models.Worker values['race_preventer'] = reference.race_preventer + 1 result = query.update(values) if not result: raise exception.WorkerNotFound(id=id, **filters) _orm_worker_update(orm_worker, values) return result @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def worker_claim_for_cleanup(context, claimer_id, orm_worker): values = {'service_id': claimer_id, 'race_preventer': orm_worker.race_preventer + 1, 'updated_at': timeutils.utcnow()} _worker_set_updated_at_field(values) # or thread query = _worker_query(context, status=orm_worker.status, service_id=orm_worker.service_id, race_preventer=orm_worker.race_preventer, until=orm_worker.updated_at, id=orm_worker.id) result = query.update(values, synchronize_session=False) if result: _orm_worker_update(orm_worker, values) return result def worker_destroy(context, **filters): query = _worker_query(context, **filters) return query.delete() ############################### @require_context def resource_exists(context, model, resource_id, session=None): conditions = [model.id == resource_id] # Match non deleted resources by the id if 'no' == context.read_deleted: conditions.append(~model.deleted) # If the context is not admin we limit it to the context's project if is_user_context(context) and hasattr(model, 'project_id'): conditions.append(model.project_id == context.project_id) session = session or get_session() query = session.query(sql.exists().where(and_(*conditions))) return query.scalar() def get_model_for_versioned_object(versioned_object): if isinstance(versioned_object, six.string_types): model_name = versioned_object else: model_name = versioned_object.obj_name() if model_name == 'BackupImport': return models.Backup return getattr(models, model_name) def _get_get_method(model): GET_EXCEPTIONS = { models.ConsistencyGroup: consistencygroup_get, models.VolumeType: _volume_type_get_full, models.QualityOfServiceSpecs: qos_specs_get, models.GroupType: _group_type_get_full, models.CGSnapshot: cgsnapshot_get, } if model in GET_EXCEPTIONS: return GET_EXCEPTIONS[model] s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__) method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get' return globals().get(method_name) _GET_METHODS = {} @require_context def get_by_id(context, model, id, *args, **kwargs): if not _GET_METHODS.get(model): _GET_METHODS[model] = _get_get_method(model) return _GET_METHODS[model](context, id, *args, **kwargs) def condition_db_filter(model, field, value): orm_field = getattr(model, field) # For values that must match and are iterables we use IN if (isinstance(value, abc.Iterable) and not isinstance(value, six.string_types)): # We cannot use in_ when one of the values is None if None not in value: return orm_field.in_(value) return or_(orm_field == v for v in value) # For values that must match and are not iterables we use == return orm_field == value def condition_not_db_filter(model, field, value, auto_none=True): result = ~condition_db_filter(model, field, value) if (auto_none and ((isinstance(value, abc.Iterable) and not isinstance(value, six.string_types) and None not in value) or (value is not None))): orm_field = getattr(model, field) result = or_(result, orm_field.is_(None)) return result def is_orm_value(obj): return isinstance(obj, (sqlalchemy.orm.attributes.InstrumentedAttribute, sqlalchemy.sql.expression.ColumnElement)) def _check_is_not_multitable(values, model): used_models = set() for field in values: if isinstance(field, sqlalchemy.orm.attributes.InstrumentedAttribute): used_models.add(field.class_) elif isinstance(field, six.string_types): used_models.add(model) else: raise exception.ProgrammingError( reason='DB Conditional update - Unknown field type, must be ' 'string or ORM field.') if len(used_models) > 1: raise exception.ProgrammingError( reason='DB Conditional update - Error in query, multitable ' 'updates are not supported.') @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def conditional_update(context, model, values, expected_values, filters=(), include_deleted='no', project_only=False, order=None): _check_is_not_multitable(values, model) # Provided filters will become part of the where clause where_conds = list(filters) # Build where conditions with operators ==, !=, NOT IN and IN for field, condition in expected_values.items(): if not isinstance(condition, db.Condition): condition = db.Condition(condition, field) where_conds.append(condition.get_filter(model, field)) # Create the query with the where clause query = model_query(context, model, read_deleted=include_deleted, project_only=project_only).filter(*where_conds) # NOTE(geguileo): Some DBs' update method are order dependent, and they order = list(order) if order else tuple() orm_field_list = [] case_list = [] unordered_list = [] for key, value in values.items(): if isinstance(value, db.Case): value = case(value.whens, value.value, value.else_) if key in order: order[order.index(key)] = (key, value) continue if isinstance(value, sql.elements.Case): value_list = case_list elif is_orm_value(value): value_list = orm_field_list else: value_list = unordered_list value_list.append((key, value)) update_args = {'synchronize_session': False} # If we don't have to enforce any kind of order just pass along the values if order or orm_field_list or case_list: values = itertools.chain(order, orm_field_list, case_list, unordered_list) update_args['update_args'] = {'preserve_parameter_order': True} result = query.update(values, **update_args) return 0 != result
true
true
1c355fb927a239cc73cf7a4f7868e0075f4fe900
3,445
py
Python
baselines/DT.py
skully-coder/COVID-19_TLSTM
7ae6b3e1d8edbcb593b1c5f70001e075b7d29c0f
[ "MIT" ]
null
null
null
baselines/DT.py
skully-coder/COVID-19_TLSTM
7ae6b3e1d8edbcb593b1c5f70001e075b7d29c0f
[ "MIT" ]
null
null
null
baselines/DT.py
skully-coder/COVID-19_TLSTM
7ae6b3e1d8edbcb593b1c5f70001e075b7d29c0f
[ "MIT" ]
null
null
null
from math import log import operator import pickle import numpy as np def load_pkl(path): with open(path, 'rb') as f: obj = pickle.load(f) return obj def calcShannonEnt(dataSet): numEntries = len(dataSet) labelCounts = {} for featVec in dataSet: currentLabel = featVec[-1] print(currentLabel) if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0 labelCounts[currentLabel] += 1 shannonEnt = 0 for key in labelCounts: prob = float(labelCounts[key])/numEntries shannonEnt -= prob*log(prob, 2) return shannonEnt def splitDataSet(dataSet, axis, value): retDataSet = [] for featVec in dataSet: if featVec[axis] == value: reducedFeatVec = featVec[:axis] reducedFeatVec.extend(featVec[axis+1:]) retDataSet.append(reducedFeatVec) return retDataSet def chooseBestFeatureToSplit(dataSet): numFeatures = len(dataSet[0])-1 baseEntropy = calcShannonEnt(dataSet) bestInfoGain = 0 bestFeature = -1 for i in range(numFeatures): featList = [example[i] for example in dataSet] uniqueVals = set(featList) newEntropy = 0 for value in uniqueVals: subDataSet = splitDataSet(dataSet, i, value) prob = len(subDataSet)/float(len(dataSet)) newEntropy += prob*calcShannonEnt(subDataSet) infoGain = baseEntropy - newEntropy if (infoGain > bestInfoGain): bestInfoGain = infoGain bestFeature = i return bestFeature def majorityCnt(classList): classCount = {} for vote in classList: if vote not in classCount.keys(): classCount[vote] = 0 classCount[vote] += 1 sortedClassCount = sorted( classCount.items(), key=operator.itemgetter(1), reverse=True) return sortedClassCount[0][0] def createTree(dataSet, labels): classList = [example[-1] for example in dataSet] if classList.count(classList[0]) == len(classList): return classList[0] if len(dataSet[0]) == 1: return majorityCnt(classList) bestFeat = chooseBestFeatureToSplit(dataSet) bestFeatLabel = labels[bestFeat] myTree = {bestFeatLabel: {}} del(labels[bestFeat]) featValues = [example[bestFeat] for example in dataSet] uniqueVals = set(featValues) for value in uniqueVals: subLabels = labels[:] myTree[bestFeatLabel][value] = createTree(splitDataSet (dataSet, bestFeat, value), subLabels) return myTree if __name__ == "__main__": path = 'BatchData' # train data path_string = path + '/TrainData.seqs' data_train_batches = load_pkl(path_string) path_string = path + '/TrainLabel.seqs' labels_train_batches = load_pkl(path_string) number_train_batches = len(data_train_batches) input_dim = np.array(data_train_batches[0]).shape[2] output_dim = np.array(labels_train_batches[0]).shape[1] print("Train data is loaded!") print(data_train_batches) path_string = path + '/TestData.seqs' data_test_batches = load_pkl(path_string) path_string = path + '/TestLabel.seqs' labels_test_batches = load_pkl(path_string) number_test_batches = len(data_test_batches) print("Test data is loaded!") print(createTree(data_train_batches, labels_train_batches))
29.956522
88
0.65283
from math import log import operator import pickle import numpy as np def load_pkl(path): with open(path, 'rb') as f: obj = pickle.load(f) return obj def calcShannonEnt(dataSet): numEntries = len(dataSet) labelCounts = {} for featVec in dataSet: currentLabel = featVec[-1] print(currentLabel) if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0 labelCounts[currentLabel] += 1 shannonEnt = 0 for key in labelCounts: prob = float(labelCounts[key])/numEntries shannonEnt -= prob*log(prob, 2) return shannonEnt def splitDataSet(dataSet, axis, value): retDataSet = [] for featVec in dataSet: if featVec[axis] == value: reducedFeatVec = featVec[:axis] reducedFeatVec.extend(featVec[axis+1:]) retDataSet.append(reducedFeatVec) return retDataSet def chooseBestFeatureToSplit(dataSet): numFeatures = len(dataSet[0])-1 baseEntropy = calcShannonEnt(dataSet) bestInfoGain = 0 bestFeature = -1 for i in range(numFeatures): featList = [example[i] for example in dataSet] uniqueVals = set(featList) newEntropy = 0 for value in uniqueVals: subDataSet = splitDataSet(dataSet, i, value) prob = len(subDataSet)/float(len(dataSet)) newEntropy += prob*calcShannonEnt(subDataSet) infoGain = baseEntropy - newEntropy if (infoGain > bestInfoGain): bestInfoGain = infoGain bestFeature = i return bestFeature def majorityCnt(classList): classCount = {} for vote in classList: if vote not in classCount.keys(): classCount[vote] = 0 classCount[vote] += 1 sortedClassCount = sorted( classCount.items(), key=operator.itemgetter(1), reverse=True) return sortedClassCount[0][0] def createTree(dataSet, labels): classList = [example[-1] for example in dataSet] if classList.count(classList[0]) == len(classList): return classList[0] if len(dataSet[0]) == 1: return majorityCnt(classList) bestFeat = chooseBestFeatureToSplit(dataSet) bestFeatLabel = labels[bestFeat] myTree = {bestFeatLabel: {}} del(labels[bestFeat]) featValues = [example[bestFeat] for example in dataSet] uniqueVals = set(featValues) for value in uniqueVals: subLabels = labels[:] myTree[bestFeatLabel][value] = createTree(splitDataSet (dataSet, bestFeat, value), subLabels) return myTree if __name__ == "__main__": path = 'BatchData' path_string = path + '/TrainData.seqs' data_train_batches = load_pkl(path_string) path_string = path + '/TrainLabel.seqs' labels_train_batches = load_pkl(path_string) number_train_batches = len(data_train_batches) input_dim = np.array(data_train_batches[0]).shape[2] output_dim = np.array(labels_train_batches[0]).shape[1] print("Train data is loaded!") print(data_train_batches) path_string = path + '/TestData.seqs' data_test_batches = load_pkl(path_string) path_string = path + '/TestLabel.seqs' labels_test_batches = load_pkl(path_string) number_test_batches = len(data_test_batches) print("Test data is loaded!") print(createTree(data_train_batches, labels_train_batches))
true
true
1c35605295ad86b98f91e271e63a15929fccf02a
146
py
Python
python/1060.py
josevictorp81/Uri-questions-solutions
99e9bacb088ce59f58fb05531bb02f2fcffe26eb
[ "MIT" ]
3
2019-09-24T00:15:47.000Z
2019-09-25T02:52:27.000Z
python/1060.py
josevictorp81/Uri-questions-solutions
99e9bacb088ce59f58fb05531bb02f2fcffe26eb
[ "MIT" ]
1
2021-03-16T00:16:11.000Z
2021-03-16T00:16:11.000Z
python/1060.py
josevictorp81/Uri-questions-solutions
99e9bacb088ce59f58fb05531bb02f2fcffe26eb
[ "MIT" ]
4
2019-10-19T12:56:03.000Z
2019-10-23T23:24:06.000Z
positivos = 0 for i in range(6): num = float(input()) if num > 0: positivos += 1 print('{} valores positivos'.format(positivos))
18.25
47
0.59589
positivos = 0 for i in range(6): num = float(input()) if num > 0: positivos += 1 print('{} valores positivos'.format(positivos))
true
true
1c356055f04e858ea20a24787b214e11e6a0e77c
2,668
py
Python
src/you_get/extractors/toutiao.py
adger-me/you-get
b32741ad8e57b3a02a1006d27e87eb5b879f1c90
[ "MIT" ]
46,956
2015-01-01T08:48:41.000Z
2022-03-31T13:38:03.000Z
src/you_get/extractors/toutiao.py
adger-me/you-get
b32741ad8e57b3a02a1006d27e87eb5b879f1c90
[ "MIT" ]
2,477
2015-01-02T18:12:33.000Z
2022-03-31T10:44:24.000Z
src/you_get/extractors/toutiao.py
adger-me/you-get
b32741ad8e57b3a02a1006d27e87eb5b879f1c90
[ "MIT" ]
10,750
2015-01-03T01:33:16.000Z
2022-03-31T06:36:28.000Z
#!/usr/bin/env python import binascii import random from json import loads from urllib.parse import urlparse from ..common import * try: from base64 import decodebytes except ImportError: from base64 import decodestring decodebytes = decodestring __all__ = ['toutiao_download', ] def random_with_n_digits(n): return random.randint(10 ** (n - 1), (10 ** n) - 1) def sign_video_url(vid): r = str(random_with_n_digits(16)) url = 'https://ib.365yg.com/video/urls/v/1/toutiao/mp4/{vid}'.format(vid=vid) n = urlparse(url).path + '?r=' + r b_n = bytes(n, encoding="utf-8") s = binascii.crc32(b_n) aid = 1364 ts = int(time.time() * 1000) return url + '?r={r}&s={s}&aid={aid}&vfrom=xgplayer&callback=axiosJsonpCallback1&_={ts}'.format(r=r, s=s, aid=aid, ts=ts) class ToutiaoVideoInfo(object): def __init__(self): self.bitrate = None self.definition = None self.size = None self.height = None self.width = None self.type = None self.url = None def __str__(self): return json.dumps(self.__dict__) def get_file_by_vid(video_id): vRet = [] url = sign_video_url(video_id) ret = get_content(url) ret = loads(ret[20:-1]) vlist = ret.get('data').get('video_list') if len(vlist) > 0: vInfo = vlist.get(sorted(vlist.keys(), reverse=True)[0]) vUrl = vInfo.get('main_url') vUrl = decodebytes(vUrl.encode('ascii')).decode('ascii') videoInfo = ToutiaoVideoInfo() videoInfo.bitrate = vInfo.get('bitrate') videoInfo.definition = vInfo.get('definition') videoInfo.size = vInfo.get('size') videoInfo.height = vInfo.get('vheight') videoInfo.width = vInfo.get('vwidth') videoInfo.type = vInfo.get('vtype') videoInfo.url = vUrl vRet.append(videoInfo) return vRet def toutiao_download(url, output_dir='.', merge=True, info_only=False, **kwargs): html = get_html(url, faker=True) video_id = match1(html, r".*?videoId: '(?P<vid>.*)'") title = match1(html, '.*?<title>(?P<title>.*?)</title>') video_file_list = get_file_by_vid(video_id) # 调api获取视频源文件 type, ext, size = url_info(video_file_list[0].url, faker=True) print_info(site_info=site_info, title=title, type=type, size=size) if not info_only: download_urls([video_file_list[0].url], title, ext, size, output_dir, merge=merge, faker=True) site_info = "Toutiao.com" download = toutiao_download download_playlist = playlist_not_supported("toutiao")
30.666667
118
0.622939
import binascii import random from json import loads from urllib.parse import urlparse from ..common import * try: from base64 import decodebytes except ImportError: from base64 import decodestring decodebytes = decodestring __all__ = ['toutiao_download', ] def random_with_n_digits(n): return random.randint(10 ** (n - 1), (10 ** n) - 1) def sign_video_url(vid): r = str(random_with_n_digits(16)) url = 'https://ib.365yg.com/video/urls/v/1/toutiao/mp4/{vid}'.format(vid=vid) n = urlparse(url).path + '?r=' + r b_n = bytes(n, encoding="utf-8") s = binascii.crc32(b_n) aid = 1364 ts = int(time.time() * 1000) return url + '?r={r}&s={s}&aid={aid}&vfrom=xgplayer&callback=axiosJsonpCallback1&_={ts}'.format(r=r, s=s, aid=aid, ts=ts) class ToutiaoVideoInfo(object): def __init__(self): self.bitrate = None self.definition = None self.size = None self.height = None self.width = None self.type = None self.url = None def __str__(self): return json.dumps(self.__dict__) def get_file_by_vid(video_id): vRet = [] url = sign_video_url(video_id) ret = get_content(url) ret = loads(ret[20:-1]) vlist = ret.get('data').get('video_list') if len(vlist) > 0: vInfo = vlist.get(sorted(vlist.keys(), reverse=True)[0]) vUrl = vInfo.get('main_url') vUrl = decodebytes(vUrl.encode('ascii')).decode('ascii') videoInfo = ToutiaoVideoInfo() videoInfo.bitrate = vInfo.get('bitrate') videoInfo.definition = vInfo.get('definition') videoInfo.size = vInfo.get('size') videoInfo.height = vInfo.get('vheight') videoInfo.width = vInfo.get('vwidth') videoInfo.type = vInfo.get('vtype') videoInfo.url = vUrl vRet.append(videoInfo) return vRet def toutiao_download(url, output_dir='.', merge=True, info_only=False, **kwargs): html = get_html(url, faker=True) video_id = match1(html, r".*?videoId: '(?P<vid>.*)'") title = match1(html, '.*?<title>(?P<title>.*?)</title>') video_file_list = get_file_by_vid(video_id) type, ext, size = url_info(video_file_list[0].url, faker=True) print_info(site_info=site_info, title=title, type=type, size=size) if not info_only: download_urls([video_file_list[0].url], title, ext, size, output_dir, merge=merge, faker=True) site_info = "Toutiao.com" download = toutiao_download download_playlist = playlist_not_supported("toutiao")
true
true
1c3561068eda230b12b956d0a336505c8afd6252
4,758
py
Python
landlab/ca/little_ca_test.py
awickert/landlab
496de56717a5877db96f354a1b1285bfabe8b56f
[ "MIT" ]
1
2015-08-17T19:29:50.000Z
2015-08-17T19:29:50.000Z
landlab/ca/little_ca_test.py
awickert/landlab
496de56717a5877db96f354a1b1285bfabe8b56f
[ "MIT" ]
1
2018-04-07T08:24:56.000Z
2018-04-07T13:52:03.000Z
landlab/ca/little_ca_test.py
awickert/landlab
496de56717a5877db96f354a1b1285bfabe8b56f
[ "MIT" ]
2
2017-07-03T20:21:13.000Z
2018-09-06T23:58:19.000Z
#!/usr/env/python """Test the creation and execution of a CellLab-CTS model. Tests the creation and execution of a CellLab-CTS model, by creating a simple two-state CA on a small grid. Created by Greg Tucker, May 2015 """ from __future__ import print_function import time from landlab import RasterModelGrid from landlab.ca.celllab_cts import Transition, CAPlotter from landlab.ca.raster_cts import RasterCTS def setup_transition_list(): """ Creates and returns a list of Transition() objects to represent state transitions for a biased random walk, in which the rate of downward motion is greater than the rate in the other three directions. Returns ------- xn_list : list of Transition objects List of objects that encode information about the link-state transitions. Notes ----- This doesn't represent any particular process, but rather is simply used to test the CA code. The transition rules have 0-0 pairs transitioning to 0-1 or 1-0 pairs (50/50 chance) and thence to 1-1 pairs, at which point there are no further transitions. The states and transitions are as follows:: Pair state Transition to Process Rate (cells/s) ========== ============= ======= ============== 0 (0-0) 1 (0-1) 0.5 2 (1-0) 0.5 1 (0-1) 3 (1-1) 1.0 2 (1-0) 3 (1-1) 1.0 3 (1-1) (none) - """ # Create an empty transition list xn_list = [] # Append two transitions to the list. # Note that the arguments to the Transition() object constructor are: # - Tuple representing starting pair state # (left/bottom cell, right/top cell, orientation) # - Tuple representing new pair state # (left/bottom cell, right/top cell, orientation) # - Transition rate (cells per time step, in this case 1 sec) # - Name for transition xn_list.append(Transition((0, 0, 0), (0, 1, 0), 0.5, '')) xn_list.append(Transition((0, 0, 0), (1, 0, 0), 0.5, '')) xn_list.append(Transition((0, 1, 0), (1, 1, 0), 1., '')) xn_list.append(Transition((1, 0, 0), (1, 1, 0), 1., '')) return xn_list def main(): # INITIALIZE # User-defined parameters nr = 5 # number of rows in grid nc = 5 # number of columns in grid plot_interval = 10.0 # time interval for plotting, sec run_duration = 10.0 # duration of run, sec report_interval = 10.0 # report interval, in real-time seconds # Remember the clock time, and calculate when we next want to report # progress. current_real_time = time.time() next_report = current_real_time + report_interval # Create grid mg = RasterModelGrid(nr, nc, 1.0) # Make the boundaries be walls mg.set_closed_boundaries_at_grid_edges(True, True, True, True) # Set up the states and pair transitions. ns_dict = {0: 'black', 1: 'white'} xn_list = setup_transition_list() # Create the node-state array and attach it to the grid node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int) # For visual display purposes, set all boundary nodes to fluid node_state_grid[mg.closed_boundary_nodes] = 0 # Create the CA model ca = RasterCTS(mg, ns_dict, xn_list, node_state_grid) # Create a CAPlotter object for handling screen display ca_plotter = CAPlotter(ca) # Plot the initial grid ca_plotter.update_plot() # RUN current_time = 0.0 while current_time < run_duration: # Once in a while, print out simulation and real time to let the user # know that the sim is running ok current_real_time = time.time() if current_real_time >= next_report: print('Current sim time', current_time, '(', 100 * current_time / run_duration, '%)') next_report = current_real_time + report_interval # Run the model forward in time until the next output step ca.run(current_time + plot_interval, ca.node_state, plot_each_transition=True, plotter=ca_plotter) current_time += plot_interval # Plot the current grid ca_plotter.update_plot() # FINALIZE # Plot ca_plotter.finalize() print('ok, here are the keys') print(ca.__dict__.keys()) # If user runs this file, activate the main() function if __name__ == "__main__": #import cProfile #fname = 'test_profiler_for_little_ca.txt' # cProfile.run('print main(); print') #, fname) main()
33.507042
78
0.617276
from __future__ import print_function import time from landlab import RasterModelGrid from landlab.ca.celllab_cts import Transition, CAPlotter from landlab.ca.raster_cts import RasterCTS def setup_transition_list(): xn_list = [] xn_list.append(Transition((0, 0, 0), (0, 1, 0), 0.5, '')) xn_list.append(Transition((0, 0, 0), (1, 0, 0), 0.5, '')) xn_list.append(Transition((0, 1, 0), (1, 1, 0), 1., '')) xn_list.append(Transition((1, 0, 0), (1, 1, 0), 1., '')) return xn_list def main(): nr = 5 nc = 5 plot_interval = 10.0 run_duration = 10.0 report_interval = 10.0 current_real_time = time.time() next_report = current_real_time + report_interval mg = RasterModelGrid(nr, nc, 1.0) mg.set_closed_boundaries_at_grid_edges(True, True, True, True) ns_dict = {0: 'black', 1: 'white'} xn_list = setup_transition_list() node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int) node_state_grid[mg.closed_boundary_nodes] = 0 ca = RasterCTS(mg, ns_dict, xn_list, node_state_grid) ca_plotter = CAPlotter(ca) ca_plotter.update_plot() current_time = 0.0 while current_time < run_duration: current_real_time = time.time() if current_real_time >= next_report: print('Current sim time', current_time, '(', 100 * current_time / run_duration, '%)') next_report = current_real_time + report_interval ca.run(current_time + plot_interval, ca.node_state, plot_each_transition=True, plotter=ca_plotter) current_time += plot_interval ca_plotter.update_plot() ca_plotter.finalize() print('ok, here are the keys') print(ca.__dict__.keys()) if __name__ == "__main__": ()
true
true
1c356129c86245e6d6b2a1086167507fd3b53e4d
5,010
py
Python
src/engine/SCons/Tool/suncxx.py
Valkatraz/scons
5e70c65f633dcecc035751c9f0c6f894088df8a0
[ "MIT" ]
1
2020-03-21T05:24:47.000Z
2020-03-21T05:24:47.000Z
src/engine/SCons/Tool/suncxx.py
Valkatraz/scons
5e70c65f633dcecc035751c9f0c6f894088df8a0
[ "MIT" ]
3
2019-01-15T20:40:02.000Z
2021-02-13T03:16:34.000Z
src/engine/SCons/Tool/suncxx.py
Valkatraz/scons
5e70c65f633dcecc035751c9f0c6f894088df8a0
[ "MIT" ]
1
2021-08-04T12:32:39.000Z
2021-08-04T12:32:39.000Z
"""SCons.Tool.sunc++ Tool-specific initialization for C++ on SunOS / Solaris. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" import SCons import os import re import subprocess import SCons.Tool.cxx cplusplus = SCons.Tool.cxx # cplusplus = __import__('c++', globals(), locals(), []) package_info = {} def get_package_info(package_name, pkginfo, pkgchk): try: return package_info[package_name] except KeyError: version = None pathname = None from subprocess import DEVNULL try: with open('/var/sadm/install/contents', 'r') as f: sadm_contents = f.read() except EnvironmentError: pass else: sadm_re = re.compile(r'^(\S*/bin/CC)(=\S*)? %s$' % package_name, re.M) sadm_match = sadm_re.search(sadm_contents) if sadm_match: pathname = os.path.dirname(sadm_match.group(1)) try: popen_args = {'stdout': subprocess.PIPE, 'stderr': DEVNULL} popen_args['universal_newlines'] = True p = subprocess.Popen([pkginfo, '-l', package_name], **popen_args) except EnvironmentError: pass else: pkginfo_contents = p.communicate()[0] pkginfo_contents.decode() version_re = re.compile(r'^ *VERSION:\s*(.*)$', re.M) version_match = version_re.search(pkginfo_contents) if version_match: version = version_match.group(1) if pathname is None: try: popen_args = {'stdout': subprocess.PIPE, 'stderr': DEVNULL} popen_args['universal_newlines'] = True p = subprocess.Popen([pkgchk, '-l', package_name], **popen_args) except EnvironmentError: pass else: pkgchk_contents = p.communicate()[0] pkgchk_contents.decode() pathname_re = re.compile(r'^Pathname:\s*(.*/bin/CC)$', re.M) pathname_match = pathname_re.search(pkgchk_contents) if pathname_match: pathname = os.path.dirname(pathname_match.group(1)) package_info[package_name] = (pathname, version) return package_info[package_name] # use the package installer tool "pkg" to figure out where cppc and what # version of it is installed def get_cppc(env): cxx = env.subst('$CXX') if cxx: cppcPath = os.path.dirname(cxx) else: cppcPath = None cppcVersion = None pkginfo = env.subst('$PKGINFO') pkgchk = env.subst('$PKGCHK') for package in ['SPROcpl']: path, version = get_package_info(package, pkginfo, pkgchk) if path and version: cppcPath, cppcVersion = path, version break return (cppcPath, 'CC', 'CC', cppcVersion) def generate(env): """Add Builders and construction variables for SunPRO C++.""" path, cxx, shcxx, version = get_cppc(env) if path: cxx = os.path.join(path, cxx) shcxx = os.path.join(path, shcxx) cplusplus.generate(env) env['CXX'] = cxx env['SHCXX'] = shcxx env['CXXVERSION'] = version env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -KPIC') env['SHOBJPREFIX'] = 'so_' env['SHOBJSUFFIX'] = '.o' def exists(env): path, cxx, shcxx, version = get_cppc(env) if path and cxx: cppc = os.path.join(path, cxx) if os.path.exists(cppc): return cppc return None # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
31.708861
82
0.622156
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" import SCons import os import re import subprocess import SCons.Tool.cxx cplusplus = SCons.Tool.cxx package_info = {} def get_package_info(package_name, pkginfo, pkgchk): try: return package_info[package_name] except KeyError: version = None pathname = None from subprocess import DEVNULL try: with open('/var/sadm/install/contents', 'r') as f: sadm_contents = f.read() except EnvironmentError: pass else: sadm_re = re.compile(r'^(\S*/bin/CC)(=\S*)? %s$' % package_name, re.M) sadm_match = sadm_re.search(sadm_contents) if sadm_match: pathname = os.path.dirname(sadm_match.group(1)) try: popen_args = {'stdout': subprocess.PIPE, 'stderr': DEVNULL} popen_args['universal_newlines'] = True p = subprocess.Popen([pkginfo, '-l', package_name], **popen_args) except EnvironmentError: pass else: pkginfo_contents = p.communicate()[0] pkginfo_contents.decode() version_re = re.compile(r'^ *VERSION:\s*(.*)$', re.M) version_match = version_re.search(pkginfo_contents) if version_match: version = version_match.group(1) if pathname is None: try: popen_args = {'stdout': subprocess.PIPE, 'stderr': DEVNULL} popen_args['universal_newlines'] = True p = subprocess.Popen([pkgchk, '-l', package_name], **popen_args) except EnvironmentError: pass else: pkgchk_contents = p.communicate()[0] pkgchk_contents.decode() pathname_re = re.compile(r'^Pathname:\s*(.*/bin/CC)$', re.M) pathname_match = pathname_re.search(pkgchk_contents) if pathname_match: pathname = os.path.dirname(pathname_match.group(1)) package_info[package_name] = (pathname, version) return package_info[package_name] def get_cppc(env): cxx = env.subst('$CXX') if cxx: cppcPath = os.path.dirname(cxx) else: cppcPath = None cppcVersion = None pkginfo = env.subst('$PKGINFO') pkgchk = env.subst('$PKGCHK') for package in ['SPROcpl']: path, version = get_package_info(package, pkginfo, pkgchk) if path and version: cppcPath, cppcVersion = path, version break return (cppcPath, 'CC', 'CC', cppcVersion) def generate(env): path, cxx, shcxx, version = get_cppc(env) if path: cxx = os.path.join(path, cxx) shcxx = os.path.join(path, shcxx) cplusplus.generate(env) env['CXX'] = cxx env['SHCXX'] = shcxx env['CXXVERSION'] = version env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -KPIC') env['SHOBJPREFIX'] = 'so_' env['SHOBJSUFFIX'] = '.o' def exists(env): path, cxx, shcxx, version = get_cppc(env) if path and cxx: cppc = os.path.join(path, cxx) if os.path.exists(cppc): return cppc return None
true
true
1c3561e7b874b6135c54fef21f4e3e4264ca964d
844
py
Python
example/tests/test_generic_validation.py
sha016/django-rest-framework-json-api
c4337f15f59811c4e72f33cf6eb3a4667cb4b197
[ "BSD-2-Clause" ]
1,011
2015-07-23T00:39:13.000Z
2022-03-25T11:05:08.000Z
example/tests/test_generic_validation.py
sha016/django-rest-framework-json-api
c4337f15f59811c4e72f33cf6eb3a4667cb4b197
[ "BSD-2-Clause" ]
819
2015-07-21T13:43:30.000Z
2022-03-20T22:01:51.000Z
example/tests/test_generic_validation.py
sha016/django-rest-framework-json-api
c4337f15f59811c4e72f33cf6eb3a4667cb4b197
[ "BSD-2-Clause" ]
345
2015-07-21T14:29:26.000Z
2022-03-22T03:25:04.000Z
from django.urls import reverse from example.tests import TestBase class GenericValidationTest(TestBase): """ Test that a non serializer specific validation can be thrown and formatted """ def setUp(self): super().setUp() self.url = reverse("user-validation", kwargs={"pk": self.miles.pk}) def test_generic_validation_error(self): """ Check error formatting """ response = self.client.get(self.url) self.assertEqual(response.status_code, 400) expected = { "errors": [ { "status": "400", "source": {"pointer": "/data"}, "detail": "Oh nohs!", "code": "invalid", } ] } assert expected == response.json()
24.823529
78
0.513033
from django.urls import reverse from example.tests import TestBase class GenericValidationTest(TestBase): def setUp(self): super().setUp() self.url = reverse("user-validation", kwargs={"pk": self.miles.pk}) def test_generic_validation_error(self): response = self.client.get(self.url) self.assertEqual(response.status_code, 400) expected = { "errors": [ { "status": "400", "source": {"pointer": "/data"}, "detail": "Oh nohs!", "code": "invalid", } ] } assert expected == response.json()
true
true
1c35621a48378e7e3a0b61da50a1d27e81d7655e
701
py
Python
migrations/versions/53ceac12ff90_.py
excalibur1987/team-management
ed6dfaf83280dad947edb31b404680d6083d7e62
[ "MIT" ]
1
2021-06-05T16:18:10.000Z
2021-06-05T16:18:10.000Z
migrations/versions/53ceac12ff90_.py
excalibur1987/flask-api-backend-boilerplate
3f0933599f12b9632a3fc697eb3dde534ec93ce1
[ "MIT" ]
null
null
null
migrations/versions/53ceac12ff90_.py
excalibur1987/flask-api-backend-boilerplate
3f0933599f12b9632a3fc697eb3dde534ec93ce1
[ "MIT" ]
null
null
null
"""empty message Revision ID: 53ceac12ff90 Revises: 516ff6933e39 Create Date: 2021-05-09 13:37:35.869550 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = "53ceac12ff90" down_revision = "ff1d1d2b820d" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column( "roles", sa.Column("description", sa.String(), server_default="", nullable=False), ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column("roles", "description") # ### end Alembic commands ###
22.612903
81
0.673324
import sqlalchemy as sa from alembic import op revision = "53ceac12ff90" down_revision = "ff1d1d2b820d" branch_labels = None depends_on = None def upgrade():
true
true
1c3562ada2bdfb7e4f0e742bb9c9d2cbc170b385
6,163
py
Python
third_party/catapult/telemetry/telemetry/internal/browser/browser_finder.py
zipated/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
third_party/catapult/telemetry/telemetry/internal/browser/browser_finder.py
cangulcan/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
395
2020-04-18T08:22:18.000Z
2021-12-08T13:04:49.000Z
third_party/catapult/telemetry/telemetry/internal/browser/browser_finder.py
cangulcan/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
338
2020-04-18T08:03:10.000Z
2022-03-29T12:33:22.000Z
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Finds browsers that can be controlled by telemetry.""" import logging from telemetry import decorators from telemetry.internal.backends.chrome import android_browser_finder from telemetry.internal.backends.chrome import cros_browser_finder from telemetry.internal.backends.chrome import desktop_browser_finder from telemetry.internal.browser import browser_finder_exceptions from telemetry.internal.platform import device_finder BROWSER_FINDERS = [ desktop_browser_finder, android_browser_finder, cros_browser_finder, ] def FindAllBrowserTypes(options): browsers = [] for bf in BROWSER_FINDERS: browsers.extend(bf.FindAllBrowserTypes(options)) return browsers @decorators.Cache def FindBrowser(options): """Finds the best PossibleBrowser object given a BrowserOptions object. Args: A BrowserOptions object. Returns: A PossibleBrowser object. Raises: BrowserFinderException: Options improperly set, or an error occurred. """ if options.__class__.__name__ == '_FakeBrowserFinderOptions': return options.fake_possible_browser if options.browser_type == 'exact' and options.browser_executable == None: raise browser_finder_exceptions.BrowserFinderException( '--browser=exact requires --browser-executable to be set.') if options.browser_type != 'exact' and options.browser_executable != None: raise browser_finder_exceptions.BrowserFinderException( '--browser-executable requires --browser=exact.') if options.browser_type == 'cros-chrome' and options.cros_remote == None: raise browser_finder_exceptions.BrowserFinderException( 'browser_type=cros-chrome requires cros_remote be set.') if (options.browser_type != 'cros-chrome' and options.browser_type != 'cros-chrome-guest' and options.cros_remote != None): raise browser_finder_exceptions.BrowserFinderException( '--remote requires --browser=cros-chrome or cros-chrome-guest.') devices = device_finder.GetDevicesMatchingOptions(options) browsers = [] default_browsers = [] for device in devices: for finder in BROWSER_FINDERS: if(options.browser_type and options.browser_type != 'any' and options.browser_type not in finder.FindAllBrowserTypes(options)): continue curr_browsers = finder.FindAllAvailableBrowsers(options, device) new_default_browser = finder.SelectDefaultBrowser(curr_browsers) if new_default_browser: default_browsers.append(new_default_browser) browsers.extend(curr_browsers) if options.browser_type == None: if default_browsers: default_browser = sorted(default_browsers, key=lambda b: b.last_modification_time)[-1] logging.warning('--browser omitted. Using most recent local build: %s', default_browser.browser_type) default_browser.UpdateExecutableIfNeeded() return default_browser if len(browsers) == 1: logging.warning('--browser omitted. Using only available browser: %s', browsers[0].browser_type) browsers[0].UpdateExecutableIfNeeded() return browsers[0] raise browser_finder_exceptions.BrowserTypeRequiredException( '--browser must be specified. Available browsers:\n%s' % '\n'.join(sorted(set([b.browser_type for b in browsers])))) if options.browser_type == 'any': types = FindAllBrowserTypes(options) def CompareBrowsersOnTypePriority(x, y): x_idx = types.index(x.browser_type) y_idx = types.index(y.browser_type) return x_idx - y_idx browsers.sort(CompareBrowsersOnTypePriority) if len(browsers) >= 1: browsers[0].UpdateExecutableIfNeeded() return browsers[0] else: return None matching_browsers = [ b for b in browsers if b.browser_type == options.browser_type and b.SupportsOptions(options.browser_options)] chosen_browser = None if len(matching_browsers) == 1: chosen_browser = matching_browsers[0] elif len(matching_browsers) > 1: logging.warning('Multiple browsers of the same type found: %s', repr(matching_browsers)) chosen_browser = sorted(matching_browsers, key=lambda b: b.last_modification_time)[-1] if chosen_browser: logging.info('Chose browser: %s', repr(chosen_browser)) chosen_browser.UpdateExecutableIfNeeded() return chosen_browser @decorators.Cache def GetAllAvailableBrowsers(options, device): """Returns a list of available browsers on the device. Args: options: A BrowserOptions object. device: The target device, which can be None. Returns: A list of browser instances. Raises: BrowserFinderException: Options are improperly set, or an error occurred. """ if not device: return [] possible_browsers = [] for browser_finder in BROWSER_FINDERS: possible_browsers.extend( browser_finder.FindAllAvailableBrowsers(options, device)) return possible_browsers @decorators.Cache def GetAllAvailableBrowserTypes(options): """Returns a list of available browser types. Args: options: A BrowserOptions object. Returns: A list of browser type strings. Raises: BrowserFinderException: Options are improperly set, or an error occurred. """ devices = device_finder.GetDevicesMatchingOptions(options) possible_browsers = [] for device in devices: possible_browsers.extend(GetAllAvailableBrowsers(options, device)) type_list = set([browser.browser_type for browser in possible_browsers]) # The reference build should be available for mac, linux and win, but the # desktop browser finder won't return it in the list of browsers. for browser in possible_browsers: if (browser.target_os == 'darwin' or browser.target_os.startswith('linux') or browser.target_os.startswith('win')): type_list.add('reference') break type_list = list(type_list) type_list.sort() return type_list
34.238889
78
0.730651
import logging from telemetry import decorators from telemetry.internal.backends.chrome import android_browser_finder from telemetry.internal.backends.chrome import cros_browser_finder from telemetry.internal.backends.chrome import desktop_browser_finder from telemetry.internal.browser import browser_finder_exceptions from telemetry.internal.platform import device_finder BROWSER_FINDERS = [ desktop_browser_finder, android_browser_finder, cros_browser_finder, ] def FindAllBrowserTypes(options): browsers = [] for bf in BROWSER_FINDERS: browsers.extend(bf.FindAllBrowserTypes(options)) return browsers @decorators.Cache def FindBrowser(options): if options.__class__.__name__ == '_FakeBrowserFinderOptions': return options.fake_possible_browser if options.browser_type == 'exact' and options.browser_executable == None: raise browser_finder_exceptions.BrowserFinderException( '--browser=exact requires --browser-executable to be set.') if options.browser_type != 'exact' and options.browser_executable != None: raise browser_finder_exceptions.BrowserFinderException( '--browser-executable requires --browser=exact.') if options.browser_type == 'cros-chrome' and options.cros_remote == None: raise browser_finder_exceptions.BrowserFinderException( 'browser_type=cros-chrome requires cros_remote be set.') if (options.browser_type != 'cros-chrome' and options.browser_type != 'cros-chrome-guest' and options.cros_remote != None): raise browser_finder_exceptions.BrowserFinderException( '--remote requires --browser=cros-chrome or cros-chrome-guest.') devices = device_finder.GetDevicesMatchingOptions(options) browsers = [] default_browsers = [] for device in devices: for finder in BROWSER_FINDERS: if(options.browser_type and options.browser_type != 'any' and options.browser_type not in finder.FindAllBrowserTypes(options)): continue curr_browsers = finder.FindAllAvailableBrowsers(options, device) new_default_browser = finder.SelectDefaultBrowser(curr_browsers) if new_default_browser: default_browsers.append(new_default_browser) browsers.extend(curr_browsers) if options.browser_type == None: if default_browsers: default_browser = sorted(default_browsers, key=lambda b: b.last_modification_time)[-1] logging.warning('--browser omitted. Using most recent local build: %s', default_browser.browser_type) default_browser.UpdateExecutableIfNeeded() return default_browser if len(browsers) == 1: logging.warning('--browser omitted. Using only available browser: %s', browsers[0].browser_type) browsers[0].UpdateExecutableIfNeeded() return browsers[0] raise browser_finder_exceptions.BrowserTypeRequiredException( '--browser must be specified. Available browsers:\n%s' % '\n'.join(sorted(set([b.browser_type for b in browsers])))) if options.browser_type == 'any': types = FindAllBrowserTypes(options) def CompareBrowsersOnTypePriority(x, y): x_idx = types.index(x.browser_type) y_idx = types.index(y.browser_type) return x_idx - y_idx browsers.sort(CompareBrowsersOnTypePriority) if len(browsers) >= 1: browsers[0].UpdateExecutableIfNeeded() return browsers[0] else: return None matching_browsers = [ b for b in browsers if b.browser_type == options.browser_type and b.SupportsOptions(options.browser_options)] chosen_browser = None if len(matching_browsers) == 1: chosen_browser = matching_browsers[0] elif len(matching_browsers) > 1: logging.warning('Multiple browsers of the same type found: %s', repr(matching_browsers)) chosen_browser = sorted(matching_browsers, key=lambda b: b.last_modification_time)[-1] if chosen_browser: logging.info('Chose browser: %s', repr(chosen_browser)) chosen_browser.UpdateExecutableIfNeeded() return chosen_browser @decorators.Cache def GetAllAvailableBrowsers(options, device): if not device: return [] possible_browsers = [] for browser_finder in BROWSER_FINDERS: possible_browsers.extend( browser_finder.FindAllAvailableBrowsers(options, device)) return possible_browsers @decorators.Cache def GetAllAvailableBrowserTypes(options): devices = device_finder.GetDevicesMatchingOptions(options) possible_browsers = [] for device in devices: possible_browsers.extend(GetAllAvailableBrowsers(options, device)) type_list = set([browser.browser_type for browser in possible_browsers]) for browser in possible_browsers: if (browser.target_os == 'darwin' or browser.target_os.startswith('linux') or browser.target_os.startswith('win')): type_list.add('reference') break type_list = list(type_list) type_list.sort() return type_list
true
true
1c35643725c62acf5a6e83ad13f91afbf70ad8f1
141,514
py
Python
black.py
springstan/black
4b449e7471c31ae2d3a890510322c40594cacc8f
[ "MIT" ]
1
2019-12-04T02:19:18.000Z
2019-12-04T02:19:18.000Z
black.py
springstan/black
4b449e7471c31ae2d3a890510322c40594cacc8f
[ "MIT" ]
2
2021-02-02T22:55:26.000Z
2021-06-02T02:26:29.000Z
black.py
springstan/black
4b449e7471c31ae2d3a890510322c40594cacc8f
[ "MIT" ]
null
null
null
import ast import asyncio from concurrent.futures import Executor, ProcessPoolExecutor from contextlib import contextmanager from datetime import datetime from enum import Enum from functools import lru_cache, partial, wraps import io import itertools import logging from multiprocessing import Manager, freeze_support import os from pathlib import Path import pickle import regex as re import signal import sys import tempfile import tokenize import traceback from typing import ( Any, Callable, Collection, Dict, Generator, Generic, Iterable, Iterator, List, Optional, Pattern, Sequence, Set, Tuple, TypeVar, Union, cast, ) from typing_extensions import Final from mypy_extensions import mypyc_attr from appdirs import user_cache_dir from dataclasses import dataclass, field, replace import click import toml from typed_ast import ast3, ast27 from pathspec import PathSpec # lib2to3 fork from blib2to3.pytree import Node, Leaf, type_repr from blib2to3 import pygram, pytree from blib2to3.pgen2 import driver, token from blib2to3.pgen2.grammar import Grammar from blib2to3.pgen2.parse import ParseError from _black_version import version as __version__ DEFAULT_LINE_LENGTH = 88 DEFAULT_EXCLUDES = r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950 DEFAULT_INCLUDES = r"\.pyi?$" CACHE_DIR = Path(user_cache_dir("black", version=__version__)) # types FileContent = str Encoding = str NewLine = str Depth = int NodeType = int LeafID = int Priority = int Index = int LN = Union[Leaf, Node] SplitFunc = Callable[["Line", Collection["Feature"]], Iterator["Line"]] Timestamp = float FileSize = int CacheInfo = Tuple[Timestamp, FileSize] Cache = Dict[Path, CacheInfo] out = partial(click.secho, bold=True, err=True) err = partial(click.secho, fg="red", err=True) pygram.initialize(CACHE_DIR) syms = pygram.python_symbols class NothingChanged(UserWarning): """Raised when reformatted code is the same as source.""" class CannotSplit(Exception): """A readable split that fits the allotted line length is impossible.""" class InvalidInput(ValueError): """Raised when input source code fails all parse attempts.""" class WriteBack(Enum): NO = 0 YES = 1 DIFF = 2 CHECK = 3 @classmethod def from_configuration(cls, *, check: bool, diff: bool) -> "WriteBack": if check and not diff: return cls.CHECK return cls.DIFF if diff else cls.YES class Changed(Enum): NO = 0 CACHED = 1 YES = 2 class TargetVersion(Enum): PY27 = 2 PY33 = 3 PY34 = 4 PY35 = 5 PY36 = 6 PY37 = 7 PY38 = 8 def is_python2(self) -> bool: return self is TargetVersion.PY27 PY36_VERSIONS = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38} class Feature(Enum): # All string literals are unicode UNICODE_LITERALS = 1 F_STRINGS = 2 NUMERIC_UNDERSCORES = 3 TRAILING_COMMA_IN_CALL = 4 TRAILING_COMMA_IN_DEF = 5 # The following two feature-flags are mutually exclusive, and exactly one should be # set for every version of python. ASYNC_IDENTIFIERS = 6 ASYNC_KEYWORDS = 7 ASSIGNMENT_EXPRESSIONS = 8 POS_ONLY_ARGUMENTS = 9 VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS}, TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, TargetVersion.PY35: { Feature.UNICODE_LITERALS, Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS, }, TargetVersion.PY36: { Feature.UNICODE_LITERALS, Feature.F_STRINGS, Feature.NUMERIC_UNDERSCORES, Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF, Feature.ASYNC_IDENTIFIERS, }, TargetVersion.PY37: { Feature.UNICODE_LITERALS, Feature.F_STRINGS, Feature.NUMERIC_UNDERSCORES, Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF, Feature.ASYNC_KEYWORDS, }, TargetVersion.PY38: { Feature.UNICODE_LITERALS, Feature.F_STRINGS, Feature.NUMERIC_UNDERSCORES, Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF, Feature.ASYNC_KEYWORDS, Feature.ASSIGNMENT_EXPRESSIONS, Feature.POS_ONLY_ARGUMENTS, }, } @dataclass class FileMode: target_versions: Set[TargetVersion] = field(default_factory=set) line_length: int = DEFAULT_LINE_LENGTH string_normalization: bool = True is_pyi: bool = False def get_cache_key(self) -> str: if self.target_versions: version_str = ",".join( str(version.value) for version in sorted(self.target_versions, key=lambda v: v.value) ) else: version_str = "-" parts = [ version_str, str(self.line_length), str(int(self.string_normalization)), str(int(self.is_pyi)), ] return ".".join(parts) def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) def read_pyproject_toml( ctx: click.Context, param: click.Parameter, value: Union[str, int, bool, None] ) -> Optional[str]: """Inject Black configuration from "pyproject.toml" into defaults in `ctx`. Returns the path to a successfully found and read configuration file, None otherwise. """ assert not isinstance(value, (int, bool)), "Invalid parameter type passed" if not value: root = find_project_root(ctx.params.get("src", ())) path = root / "pyproject.toml" if path.is_file(): value = str(path) else: return None try: pyproject_toml = toml.load(value) config = pyproject_toml.get("tool", {}).get("black", {}) except (toml.TomlDecodeError, OSError) as e: raise click.FileError( filename=value, hint=f"Error reading configuration file: {e}" ) if not config: return None if ctx.default_map is None: ctx.default_map = {} ctx.default_map.update( # type: ignore # bad types in .pyi {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} ) return value def target_version_option_callback( c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...] ) -> List[TargetVersion]: """Compute the target versions from a --target-version flag. This is its own function because mypy couldn't infer the type correctly when it was a lambda, causing mypyc trouble. """ return [TargetVersion[val.upper()] for val in v] @click.command(context_settings=dict(help_option_names=["-h", "--help"])) @click.option("-c", "--code", type=str, help="Format the code passed in as a string.") @click.option( "-l", "--line-length", type=int, default=DEFAULT_LINE_LENGTH, help="How many characters per line to allow.", show_default=True, ) @click.option( "-t", "--target-version", type=click.Choice([v.name.lower() for v in TargetVersion]), callback=target_version_option_callback, multiple=True, help=( "Python versions that should be supported by Black's output. [default: " "per-file auto-detection]" ), ) @click.option( "--py36", is_flag=True, help=( "Allow using Python 3.6-only syntax on all input files. This will put " "trailing commas in function signatures and calls also after *args and " "**kwargs. Deprecated; use --target-version instead. " "[default: per-file auto-detection]" ), ) @click.option( "--pyi", is_flag=True, help=( "Format all input files like typing stubs regardless of file extension " "(useful when piping source on standard input)." ), ) @click.option( "-S", "--skip-string-normalization", is_flag=True, help="Don't normalize string quotes or prefixes.", ) @click.option( "--check", is_flag=True, help=( "Don't write the files back, just return the status. Return code 0 " "means nothing would change. Return code 1 means some files would be " "reformatted. Return code 123 means there was an internal error." ), ) @click.option( "--diff", is_flag=True, help="Don't write the files back, just output a diff for each file on stdout.", ) @click.option( "--fast/--safe", is_flag=True, help="If --fast given, skip temporary sanity checks. [default: --safe]", ) @click.option( "--include", type=str, default=DEFAULT_INCLUDES, help=( "A regular expression that matches files and directories that should be " "included on recursive searches. An empty value means all files are " "included regardless of the name. Use forward slashes for directories on " "all platforms (Windows, too). Exclusions are calculated first, inclusions " "later." ), show_default=True, ) @click.option( "--exclude", type=str, default=DEFAULT_EXCLUDES, help=( "A regular expression that matches files and directories that should be " "excluded on recursive searches. An empty value means no paths are excluded. " "Use forward slashes for directories on all platforms (Windows, too). " "Exclusions are calculated first, inclusions later." ), show_default=True, ) @click.option( "-q", "--quiet", is_flag=True, help=( "Don't emit non-error messages to stderr. Errors are still emitted; " "silence those with 2>/dev/null." ), ) @click.option( "-v", "--verbose", is_flag=True, help=( "Also emit messages to stderr about files that were not changed or were " "ignored due to --exclude=." ), ) @click.version_option(version=__version__) @click.argument( "src", nargs=-1, type=click.Path( exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True ), is_eager=True, ) @click.option( "--config", type=click.Path( exists=False, file_okay=True, dir_okay=False, readable=True, allow_dash=False ), is_eager=True, callback=read_pyproject_toml, help="Read configuration from PATH.", ) @click.pass_context def main( ctx: click.Context, code: Optional[str], line_length: int, target_version: List[TargetVersion], check: bool, diff: bool, fast: bool, pyi: bool, py36: bool, skip_string_normalization: bool, quiet: bool, verbose: bool, include: str, exclude: str, src: Tuple[str, ...], config: Optional[str], ) -> None: """The uncompromising code formatter.""" write_back = WriteBack.from_configuration(check=check, diff=diff) if target_version: if py36: err(f"Cannot use both --target-version and --py36") ctx.exit(2) else: versions = set(target_version) elif py36: err( "--py36 is deprecated and will be removed in a future version. " "Use --target-version py36 instead." ) versions = PY36_VERSIONS else: # We'll autodetect later. versions = set() mode = FileMode( target_versions=versions, line_length=line_length, is_pyi=pyi, string_normalization=not skip_string_normalization, ) if config and verbose: out(f"Using configuration from {config}.", bold=False, fg="blue") if code is not None: print(format_str(code, mode=mode)) ctx.exit(0) try: include_regex = re_compile_maybe_verbose(include) except re.error: err(f"Invalid regular expression for include given: {include!r}") ctx.exit(2) try: exclude_regex = re_compile_maybe_verbose(exclude) except re.error: err(f"Invalid regular expression for exclude given: {exclude!r}") ctx.exit(2) report = Report(check=check, quiet=quiet, verbose=verbose) root = find_project_root(src) sources: Set[Path] = set() path_empty(src, quiet, verbose, ctx) for s in src: p = Path(s) if p.is_dir(): sources.update( gen_python_files_in_dir( p, root, include_regex, exclude_regex, report, get_gitignore(root) ) ) elif p.is_file() or s == "-": # if a file was explicitly given, we don't care about its extension sources.add(p) else: err(f"invalid path: {s}") if len(sources) == 0: if verbose or not quiet: out("No Python files are present to be formatted. Nothing to do 😴") ctx.exit(0) if len(sources) == 1: reformat_one( src=sources.pop(), fast=fast, write_back=write_back, mode=mode, report=report, ) else: reformat_many( sources=sources, fast=fast, write_back=write_back, mode=mode, report=report ) if verbose or not quiet: out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨") click.secho(str(report), err=True) ctx.exit(report.return_code) def path_empty( src: Tuple[str, ...], quiet: bool, verbose: bool, ctx: click.Context ) -> None: """ Exit if there is no `src` provided for formatting """ if not src: if verbose or not quiet: out("No Path provided. Nothing to do 😴") ctx.exit(0) def reformat_one( src: Path, fast: bool, write_back: WriteBack, mode: FileMode, report: "Report" ) -> None: """Reformat a single file under `src` without spawning child processes. `fast`, `write_back`, and `mode` options are passed to :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. """ try: changed = Changed.NO if not src.is_file() and str(src) == "-": if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode): changed = Changed.YES else: cache: Cache = {} if write_back != WriteBack.DIFF: cache = read_cache(mode) res_src = src.resolve() if res_src in cache and cache[res_src] == get_cache_info(res_src): changed = Changed.CACHED if changed is not Changed.CACHED and format_file_in_place( src, fast=fast, write_back=write_back, mode=mode ): changed = Changed.YES if (write_back is WriteBack.YES and changed is not Changed.CACHED) or ( write_back is WriteBack.CHECK and changed is Changed.NO ): write_cache(cache, [src], mode) report.done(src, changed) except Exception as exc: report.failed(src, str(exc)) def reformat_many( sources: Set[Path], fast: bool, write_back: WriteBack, mode: FileMode, report: "Report", ) -> None: """Reformat multiple files using a ProcessPoolExecutor.""" loop = asyncio.get_event_loop() worker_count = os.cpu_count() if sys.platform == "win32": # Work around https://bugs.python.org/issue26903 worker_count = min(worker_count, 61) executor = ProcessPoolExecutor(max_workers=worker_count) try: loop.run_until_complete( schedule_formatting( sources=sources, fast=fast, write_back=write_back, mode=mode, report=report, loop=loop, executor=executor, ) ) finally: shutdown(loop) executor.shutdown() async def schedule_formatting( sources: Set[Path], fast: bool, write_back: WriteBack, mode: FileMode, report: "Report", loop: asyncio.AbstractEventLoop, executor: Executor, ) -> None: """Run formatting of `sources` in parallel using the provided `executor`. (Use ProcessPoolExecutors for actual parallelism.) `write_back`, `fast`, and `mode` options are passed to :func:`format_file_in_place`. """ cache: Cache = {} if write_back != WriteBack.DIFF: cache = read_cache(mode) sources, cached = filter_cached(cache, sources) for src in sorted(cached): report.done(src, Changed.CACHED) if not sources: return cancelled = [] sources_to_cache = [] lock = None if write_back == WriteBack.DIFF: # For diff output, we need locks to ensure we don't interleave output # from different processes. manager = Manager() lock = manager.Lock() tasks = { asyncio.ensure_future( loop.run_in_executor( executor, format_file_in_place, src, fast, mode, write_back, lock ) ): src for src in sorted(sources) } pending: Iterable["asyncio.Future[bool]"] = tasks.keys() try: loop.add_signal_handler(signal.SIGINT, cancel, pending) loop.add_signal_handler(signal.SIGTERM, cancel, pending) except NotImplementedError: # There are no good alternatives for these on Windows. pass while pending: done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) for task in done: src = tasks.pop(task) if task.cancelled(): cancelled.append(task) elif task.exception(): report.failed(src, str(task.exception())) else: changed = Changed.YES if task.result() else Changed.NO # If the file was written back or was successfully checked as # well-formatted, store this information in the cache. if write_back is WriteBack.YES or ( write_back is WriteBack.CHECK and changed is Changed.NO ): sources_to_cache.append(src) report.done(src, changed) if cancelled: await asyncio.gather(*cancelled, loop=loop, return_exceptions=True) if sources_to_cache: write_cache(cache, sources_to_cache, mode) def format_file_in_place( src: Path, fast: bool, mode: FileMode, write_back: WriteBack = WriteBack.NO, lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy ) -> bool: """Format file under `src` path. Return True if changed. If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted code to the file. `mode` and `fast` options are passed to :func:`format_file_contents`. """ if src.suffix == ".pyi": mode = replace(mode, is_pyi=True) then = datetime.utcfromtimestamp(src.stat().st_mtime) with open(src, "rb") as buf: src_contents, encoding, newline = decode_bytes(buf.read()) try: dst_contents = format_file_contents(src_contents, fast=fast, mode=mode) except NothingChanged: return False if write_back == WriteBack.YES: with open(src, "w", encoding=encoding, newline=newline) as f: f.write(dst_contents) elif write_back == WriteBack.DIFF: now = datetime.utcnow() src_name = f"{src}\t{then} +0000" dst_name = f"{src}\t{now} +0000" diff_contents = diff(src_contents, dst_contents, src_name, dst_name) with lock or nullcontext(): f = io.TextIOWrapper( sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True, ) f.write(diff_contents) f.detach() return True def format_stdin_to_stdout( fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: FileMode ) -> bool: """Format file on stdin. Return True if changed. If `write_back` is YES, write reformatted code back to stdout. If it is DIFF, write a diff to stdout. The `mode` argument is passed to :func:`format_file_contents`. """ then = datetime.utcnow() src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) dst = src try: dst = format_file_contents(src, fast=fast, mode=mode) return True except NothingChanged: return False finally: f = io.TextIOWrapper( sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True ) if write_back == WriteBack.YES: f.write(dst) elif write_back == WriteBack.DIFF: now = datetime.utcnow() src_name = f"STDIN\t{then} +0000" dst_name = f"STDOUT\t{now} +0000" f.write(diff(src, dst, src_name, dst_name)) f.detach() def format_file_contents( src_contents: str, *, fast: bool, mode: FileMode ) -> FileContent: """Reformat contents a file and return new contents. If `fast` is False, additionally confirm that the reformatted code is valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it. `mode` is passed to :func:`format_str`. """ if src_contents.strip() == "": raise NothingChanged dst_contents = format_str(src_contents, mode=mode) if src_contents == dst_contents: raise NothingChanged if not fast: assert_equivalent(src_contents, dst_contents) assert_stable(src_contents, dst_contents, mode=mode) return dst_contents def format_str(src_contents: str, *, mode: FileMode) -> FileContent: """Reformat a string and return new contents. `mode` determines formatting options, such as how many characters per line are allowed. """ src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) dst_contents = [] future_imports = get_future_imports(src_node) if mode.target_versions: versions = mode.target_versions else: versions = detect_target_versions(src_node) normalize_fmt_off(src_node) lines = LineGenerator( remove_u_prefix="unicode_literals" in future_imports or supports_feature(versions, Feature.UNICODE_LITERALS), is_pyi=mode.is_pyi, normalize_strings=mode.string_normalization, ) elt = EmptyLineTracker(is_pyi=mode.is_pyi) empty_line = Line() after = 0 split_line_features = { feature for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF} if supports_feature(versions, feature) } for current_line in lines.visit(src_node): for _ in range(after): dst_contents.append(str(empty_line)) before, after = elt.maybe_empty_lines(current_line) for _ in range(before): dst_contents.append(str(empty_line)) for line in split_line( current_line, line_length=mode.line_length, features=split_line_features ): dst_contents.append(str(line)) return "".join(dst_contents) def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]: """Return a tuple of (decoded_contents, encoding, newline). `newline` is either CRLF or LF but `decoded_contents` is decoded with universal newlines (i.e. only contains LF). """ srcbuf = io.BytesIO(src) encoding, lines = tokenize.detect_encoding(srcbuf.readline) if not lines: return "", encoding, "\n" newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n" srcbuf.seek(0) with io.TextIOWrapper(srcbuf, encoding) as tiow: return tiow.read(), encoding, newline def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: if not target_versions: # No target_version specified, so try all grammars. return [ # Python 3.7+ pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, # Python 3.0-3.6 pygram.python_grammar_no_print_statement_no_exec_statement, # Python 2.7 with future print_function import pygram.python_grammar_no_print_statement, # Python 2.7 pygram.python_grammar, ] if all(version.is_python2() for version in target_versions): # Python 2-only code, so try Python 2 grammars. return [ # Python 2.7 with future print_function import pygram.python_grammar_no_print_statement, # Python 2.7 pygram.python_grammar, ] # Python 3-compatible code, so only try Python 3 grammar. grammars = [] # If we have to parse both, try to parse async as a keyword first if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS): # Python 3.7+ grammars.append( pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords ) if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): # Python 3.0-3.6 grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) # At least one of the above branches must have been taken, because every Python # version has exactly one of the two 'ASYNC_*' flags return grammars def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: """Given a string with source, return the lib2to3 Node.""" if src_txt[-1:] != "\n": src_txt += "\n" for grammar in get_grammars(set(target_versions)): drv = driver.Driver(grammar, pytree.convert) try: result = drv.parse_string(src_txt, True) break except ParseError as pe: lineno, column = pe.context[1] lines = src_txt.splitlines() try: faulty_line = lines[lineno - 1] except IndexError: faulty_line = "<line number missing in source>" exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}") else: raise exc from None if isinstance(result, Leaf): result = Node(syms.file_input, [result]) return result def lib2to3_unparse(node: Node) -> str: """Given a lib2to3 node, return its string representation.""" code = str(node) return code T = TypeVar("T") class Visitor(Generic[T]): """Basic lib2to3 visitor that yields things of type `T` on `visit()`.""" def visit(self, node: LN) -> Iterator[T]: """Main method to visit `node` and its children. It tries to find a `visit_*()` method for the given `node.type`, like `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects. If no dedicated `visit_*()` method is found, chooses `visit_default()` instead. Then yields objects of type `T` from the selected visitor. """ if node.type < 256: name = token.tok_name[node.type] else: name = str(type_repr(node.type)) # We explicitly branch on whether a visitor exists (instead of # using self.visit_default as the default arg to getattr) in order # to save needing to create a bound method object and so mypyc can # generate a native call to visit_default. visitf = getattr(self, f"visit_{name}", None) if visitf: yield from visitf(node) else: yield from self.visit_default(node) def visit_default(self, node: LN) -> Iterator[T]: """Default `visit_*()` implementation. Recurses to children of `node`.""" if isinstance(node, Node): for child in node.children: yield from self.visit(child) @dataclass class DebugVisitor(Visitor[T]): tree_depth: int = 0 def visit_default(self, node: LN) -> Iterator[T]: indent = " " * (2 * self.tree_depth) if isinstance(node, Node): _type = type_repr(node.type) out(f"{indent}{_type}", fg="yellow") self.tree_depth += 1 for child in node.children: yield from self.visit(child) self.tree_depth -= 1 out(f"{indent}/{_type}", fg="yellow", bold=False) else: _type = token.tok_name.get(node.type, str(node.type)) out(f"{indent}{_type}", fg="blue", nl=False) if node.prefix: # We don't have to handle prefixes for `Node` objects since # that delegates to the first child anyway. out(f" {node.prefix!r}", fg="green", bold=False, nl=False) out(f" {node.value!r}", fg="blue", bold=False) @classmethod def show(cls, code: Union[str, Leaf, Node]) -> None: """Pretty-print the lib2to3 AST of a given string of `code`. Convenience method for debugging. """ v: DebugVisitor[None] = DebugVisitor() if isinstance(code, str): code = lib2to3_parse(code) list(v.visit(code)) WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE} STATEMENT: Final = { syms.if_stmt, syms.while_stmt, syms.for_stmt, syms.try_stmt, syms.except_clause, syms.with_stmt, syms.funcdef, syms.classdef, } STANDALONE_COMMENT: Final = 153 token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" LOGIC_OPERATORS: Final = {"and", "or"} COMPARATORS: Final = { token.LESS, token.GREATER, token.EQEQUAL, token.NOTEQUAL, token.LESSEQUAL, token.GREATEREQUAL, } MATH_OPERATORS: Final = { token.VBAR, token.CIRCUMFLEX, token.AMPER, token.LEFTSHIFT, token.RIGHTSHIFT, token.PLUS, token.MINUS, token.STAR, token.SLASH, token.DOUBLESLASH, token.PERCENT, token.AT, token.TILDE, token.DOUBLESTAR, } STARS: Final = {token.STAR, token.DOUBLESTAR} VARARGS_SPECIALS: Final = STARS | {token.SLASH} VARARGS_PARENTS: Final = { syms.arglist, syms.argument, # double star in arglist syms.trailer, # single argument to call syms.typedargslist, syms.varargslist, # lambdas } UNPACKING_PARENTS: Final = { syms.atom, # single element of a list or set literal syms.dictsetmaker, syms.listmaker, syms.testlist_gexp, syms.testlist_star_expr, } TEST_DESCENDANTS: Final = { syms.test, syms.lambdef, syms.or_test, syms.and_test, syms.not_test, syms.comparison, syms.star_expr, syms.expr, syms.xor_expr, syms.and_expr, syms.shift_expr, syms.arith_expr, syms.trailer, syms.term, syms.power, } ASSIGNMENTS: Final = { "=", "+=", "-=", "*=", "@=", "/=", "%=", "&=", "|=", "^=", "<<=", ">>=", "**=", "//=", } COMPREHENSION_PRIORITY: Final = 20 COMMA_PRIORITY: Final = 18 TERNARY_PRIORITY: Final = 16 LOGIC_PRIORITY: Final = 14 STRING_PRIORITY: Final = 12 COMPARATOR_PRIORITY: Final = 10 MATH_PRIORITIES: Final = { token.VBAR: 9, token.CIRCUMFLEX: 8, token.AMPER: 7, token.LEFTSHIFT: 6, token.RIGHTSHIFT: 6, token.PLUS: 5, token.MINUS: 5, token.STAR: 4, token.SLASH: 4, token.DOUBLESLASH: 4, token.PERCENT: 4, token.AT: 4, token.TILDE: 3, token.DOUBLESTAR: 2, } DOT_PRIORITY: Final = 1 @dataclass class BracketTracker: """Keeps track of brackets on a line.""" depth: int = 0 bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict) delimiters: Dict[LeafID, Priority] = field(default_factory=dict) previous: Optional[Leaf] = None _for_loop_depths: List[int] = field(default_factory=list) _lambda_argument_depths: List[int] = field(default_factory=list) def mark(self, leaf: Leaf) -> None: """Mark `leaf` with bracket-related metadata. Keep track of delimiters. All leaves receive an int `bracket_depth` field that stores how deep within brackets a given leaf is. 0 means there are no enclosing brackets that started on this line. If a leaf is itself a closing bracket, it receives an `opening_bracket` field that it forms a pair with. This is a one-directional link to avoid reference cycles. If a leaf is a delimiter (a token on which Black can split the line if needed) and it's on depth 0, its `id()` is stored in the tracker's `delimiters` field. """ if leaf.type == token.COMMENT: return self.maybe_decrement_after_for_loop_variable(leaf) self.maybe_decrement_after_lambda_arguments(leaf) if leaf.type in CLOSING_BRACKETS: self.depth -= 1 opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) leaf.opening_bracket = opening_bracket leaf.bracket_depth = self.depth if self.depth == 0: delim = is_split_before_delimiter(leaf, self.previous) if delim and self.previous is not None: self.delimiters[id(self.previous)] = delim else: delim = is_split_after_delimiter(leaf, self.previous) if delim: self.delimiters[id(leaf)] = delim if leaf.type in OPENING_BRACKETS: self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf self.depth += 1 self.previous = leaf self.maybe_increment_lambda_arguments(leaf) self.maybe_increment_for_loop_variable(leaf) def any_open_brackets(self) -> bool: """Return True if there is an yet unmatched open bracket on the line.""" return bool(self.bracket_match) def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: """Return the highest priority of a delimiter found on the line. Values are consistent with what `is_split_*_delimiter()` return. Raises ValueError on no delimiters. """ return max(v for k, v in self.delimiters.items() if k not in exclude) def delimiter_count_with_priority(self, priority: Priority = 0) -> int: """Return the number of delimiters with the given `priority`. If no `priority` is passed, defaults to max priority on the line. """ if not self.delimiters: return 0 priority = priority or self.max_delimiter_priority() return sum(1 for p in self.delimiters.values() if p == priority) def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: """In a for loop, or comprehension, the variables are often unpacks. To avoid splitting on the comma in this situation, increase the depth of tokens between `for` and `in`. """ if leaf.type == token.NAME and leaf.value == "for": self.depth += 1 self._for_loop_depths.append(self.depth) return True return False def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: """See `maybe_increment_for_loop_variable` above for explanation.""" if ( self._for_loop_depths and self._for_loop_depths[-1] == self.depth and leaf.type == token.NAME and leaf.value == "in" ): self.depth -= 1 self._for_loop_depths.pop() return True return False def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: """In a lambda expression, there might be more than one argument. To avoid splitting on the comma in this situation, increase the depth of tokens between `lambda` and `:`. """ if leaf.type == token.NAME and leaf.value == "lambda": self.depth += 1 self._lambda_argument_depths.append(self.depth) return True return False def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: """See `maybe_increment_lambda_arguments` above for explanation.""" if ( self._lambda_argument_depths and self._lambda_argument_depths[-1] == self.depth and leaf.type == token.COLON ): self.depth -= 1 self._lambda_argument_depths.pop() return True return False def get_open_lsqb(self) -> Optional[Leaf]: """Return the most recent opening square bracket (if any).""" return self.bracket_match.get((self.depth - 1, token.RSQB)) @dataclass class Line: """Holds leaves and comments. Can be printed with `str(line)`.""" depth: int = 0 leaves: List[Leaf] = field(default_factory=list) # keys ordered like `leaves` comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict) bracket_tracker: BracketTracker = field(default_factory=BracketTracker) inside_brackets: bool = False should_explode: bool = False def append(self, leaf: Leaf, preformatted: bool = False) -> None: """Add a new `leaf` to the end of the line. Unless `preformatted` is True, the `leaf` will receive a new consistent whitespace prefix and metadata applied by :class:`BracketTracker`. Trailing commas are maybe removed, unpacked for loop variables are demoted from being delimiters. Inline comments are put aside. """ has_value = leaf.type in BRACKETS or bool(leaf.value.strip()) if not has_value: return if token.COLON == leaf.type and self.is_class_paren_empty: del self.leaves[-2:] if self.leaves and not preformatted: # Note: at this point leaf.prefix should be empty except for # imports, for which we only preserve newlines. leaf.prefix += whitespace( leaf, complex_subscript=self.is_complex_subscript(leaf) ) if self.inside_brackets or not preformatted: self.bracket_tracker.mark(leaf) self.maybe_remove_trailing_comma(leaf) if not self.append_comment(leaf): self.leaves.append(leaf) def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: """Like :func:`append()` but disallow invalid standalone comment structure. Raises ValueError when any `leaf` is appended after a standalone comment or when a standalone comment is not the first leaf on the line. """ if self.bracket_tracker.depth == 0: if self.is_comment: raise ValueError("cannot append to standalone comments") if self.leaves and leaf.type == STANDALONE_COMMENT: raise ValueError( "cannot append standalone comments to a populated line" ) self.append(leaf, preformatted=preformatted) @property def is_comment(self) -> bool: """Is this line a standalone comment?""" return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT @property def is_decorator(self) -> bool: """Is this line a decorator?""" return bool(self) and self.leaves[0].type == token.AT @property def is_import(self) -> bool: """Is this an import line?""" return bool(self) and is_import(self.leaves[0]) @property def is_class(self) -> bool: """Is this line a class definition?""" return ( bool(self) and self.leaves[0].type == token.NAME and self.leaves[0].value == "class" ) @property def is_stub_class(self) -> bool: """Is this line a class definition with a body consisting only of "..."?""" return self.is_class and self.leaves[-3:] == [ Leaf(token.DOT, ".") for _ in range(3) ] @property def is_collection_with_optional_trailing_comma(self) -> bool: """Is this line a collection literal with a trailing comma that's optional? Note that the trailing comma in a 1-tuple is not optional. """ if not self.leaves or len(self.leaves) < 4: return False # Look for and address a trailing colon. if self.leaves[-1].type == token.COLON: closer = self.leaves[-2] close_index = -2 else: closer = self.leaves[-1] close_index = -1 if closer.type not in CLOSING_BRACKETS or self.inside_brackets: return False if closer.type == token.RPAR: # Tuples require an extra check, because if there's only # one element in the tuple removing the comma unmakes the # tuple. # # We also check for parens before looking for the trailing # comma because in some cases (eg assigning a dict # literal) the literal gets wrapped in temporary parens # during parsing. This case is covered by the # collections.py test data. opener = closer.opening_bracket for _open_index, leaf in enumerate(self.leaves): if leaf is opener: break else: # Couldn't find the matching opening paren, play it safe. return False commas = 0 comma_depth = self.leaves[close_index - 1].bracket_depth for leaf in self.leaves[_open_index + 1 : close_index]: if leaf.bracket_depth == comma_depth and leaf.type == token.COMMA: commas += 1 if commas > 1: # We haven't looked yet for the trailing comma because # we might also have caught noop parens. return self.leaves[close_index - 1].type == token.COMMA elif commas == 1: return False # it's either a one-tuple or didn't have a trailing comma if self.leaves[close_index - 1].type in CLOSING_BRACKETS: close_index -= 1 closer = self.leaves[close_index] if closer.type == token.RPAR: # TODO: this is a gut feeling. Will we ever see this? return False if self.leaves[close_index - 1].type != token.COMMA: return False return True @property def is_def(self) -> bool: """Is this a function definition? (Also returns True for async defs.)""" try: first_leaf = self.leaves[0] except IndexError: return False try: second_leaf: Optional[Leaf] = self.leaves[1] except IndexError: second_leaf = None return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( first_leaf.type == token.ASYNC and second_leaf is not None and second_leaf.type == token.NAME and second_leaf.value == "def" ) @property def is_class_paren_empty(self) -> bool: """Is this a class with no base classes but using parentheses? Those are unnecessary and should be removed. """ return ( bool(self) and len(self.leaves) == 4 and self.is_class and self.leaves[2].type == token.LPAR and self.leaves[2].value == "(" and self.leaves[3].type == token.RPAR and self.leaves[3].value == ")" ) @property def is_triple_quoted_string(self) -> bool: """Is the line a triple quoted string?""" return ( bool(self) and self.leaves[0].type == token.STRING and self.leaves[0].value.startswith(('"""', "'''")) ) def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: """If so, needs to be split before emitting.""" for leaf in self.leaves: if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: return True return False def contains_uncollapsable_type_comments(self) -> bool: ignored_ids = set() try: last_leaf = self.leaves[-1] ignored_ids.add(id(last_leaf)) if last_leaf.type == token.COMMA or ( last_leaf.type == token.RPAR and not last_leaf.value ): # When trailing commas or optional parens are inserted by Black for # consistency, comments after the previous last element are not moved # (they don't have to, rendering will still be correct). So we ignore # trailing commas and invisible. last_leaf = self.leaves[-2] ignored_ids.add(id(last_leaf)) except IndexError: return False # A type comment is uncollapsable if it is attached to a leaf # that isn't at the end of the line (since that could cause it # to get associated to a different argument) or if there are # comments before it (since that could cause it to get hidden # behind a comment. comment_seen = False for leaf_id, comments in self.comments.items(): for comment in comments: if is_type_comment(comment): if comment_seen or ( not is_type_comment(comment, " ignore") and leaf_id not in ignored_ids ): return True comment_seen = True return False def contains_unsplittable_type_ignore(self) -> bool: if not self.leaves: return False # If a 'type: ignore' is attached to the end of a line, we # can't split the line, because we can't know which of the # subexpressions the ignore was meant to apply to. # # We only want this to apply to actual physical lines from the # original source, though: we don't want the presence of a # 'type: ignore' at the end of a multiline expression to # justify pushing it all onto one line. Thus we # (unfortunately) need to check the actual source lines and # only report an unsplittable 'type: ignore' if this line was # one line in the original code. # Grab the first and last line numbers, skipping generated leaves first_line = next((l.lineno for l in self.leaves if l.lineno != 0), 0) last_line = next((l.lineno for l in reversed(self.leaves) if l.lineno != 0), 0) if first_line == last_line: # We look at the last two leaves since a comma or an # invisible paren could have been added at the end of the # line. for node in self.leaves[-2:]: for comment in self.comments.get(id(node), []): if is_type_comment(comment, " ignore"): return True return False def contains_multiline_strings(self) -> bool: for leaf in self.leaves: if is_multiline_string(leaf): return True return False def maybe_remove_trailing_comma(self, closing: Leaf) -> bool: """Remove trailing comma if there is one and it's safe.""" if not (self.leaves and self.leaves[-1].type == token.COMMA): return False # We remove trailing commas only in the case of importing a # single name from a module. if not ( self.leaves and self.is_import and len(self.leaves) > 4 and self.leaves[-1].type == token.COMMA and closing.type in CLOSING_BRACKETS and self.leaves[-4].type == token.NAME and ( # regular `from foo import bar,` self.leaves[-4].value == "import" # `from foo import (bar as baz,) or ( len(self.leaves) > 6 and self.leaves[-6].value == "import" and self.leaves[-3].value == "as" ) # `from foo import bar as baz,` or ( len(self.leaves) > 5 and self.leaves[-5].value == "import" and self.leaves[-3].value == "as" ) ) and closing.type == token.RPAR ): return False self.remove_trailing_comma() return True def append_comment(self, comment: Leaf) -> bool: """Add an inline or standalone comment to the line.""" if ( comment.type == STANDALONE_COMMENT and self.bracket_tracker.any_open_brackets() ): comment.prefix = "" return False if comment.type != token.COMMENT: return False if not self.leaves: comment.type = STANDALONE_COMMENT comment.prefix = "" return False last_leaf = self.leaves[-1] if ( last_leaf.type == token.RPAR and not last_leaf.value and last_leaf.parent and len(list(last_leaf.parent.leaves())) <= 3 and not is_type_comment(comment) ): # Comments on an optional parens wrapping a single leaf should belong to # the wrapped node except if it's a type comment. Pinning the comment like # this avoids unstable formatting caused by comment migration. if len(self.leaves) < 2: comment.type = STANDALONE_COMMENT comment.prefix = "" return False last_leaf = self.leaves[-2] self.comments.setdefault(id(last_leaf), []).append(comment) return True def comments_after(self, leaf: Leaf) -> List[Leaf]: """Generate comments that should appear directly after `leaf`.""" return self.comments.get(id(leaf), []) def remove_trailing_comma(self) -> None: """Remove the trailing comma and moves the comments attached to it.""" trailing_comma = self.leaves.pop() trailing_comma_comments = self.comments.pop(id(trailing_comma), []) self.comments.setdefault(id(self.leaves[-1]), []).extend( trailing_comma_comments ) def is_complex_subscript(self, leaf: Leaf) -> bool: """Return True iff `leaf` is part of a slice with non-trivial exprs.""" open_lsqb = self.bracket_tracker.get_open_lsqb() if open_lsqb is None: return False subscript_start = open_lsqb.next_sibling if isinstance(subscript_start, Node): if subscript_start.type == syms.listmaker: return False if subscript_start.type == syms.subscriptlist: subscript_start = child_towards(subscript_start, leaf) return subscript_start is not None and any( n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() ) def __str__(self) -> str: """Render the line.""" if not self: return "\n" indent = " " * self.depth leaves = iter(self.leaves) first = next(leaves) res = f"{first.prefix}{indent}{first.value}" for leaf in leaves: res += str(leaf) for comment in itertools.chain.from_iterable(self.comments.values()): res += str(comment) return res + "\n" def __bool__(self) -> bool: """Return True if the line has leaves or comments.""" return bool(self.leaves or self.comments) @dataclass class EmptyLineTracker: """Provides a stateful method that returns the number of potential extra empty lines needed before and after the currently processed line. Note: this tracker works on lines that haven't been split yet. It assumes the prefix of the first leaf consists of optional newlines. Those newlines are consumed by `maybe_empty_lines()` and included in the computation. """ is_pyi: bool = False previous_line: Optional[Line] = None previous_after: int = 0 previous_defs: List[int] = field(default_factory=list) def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: """Return the number of extra empty lines before and after the `current_line`. This is for separating `def`, `async def` and `class` with extra empty lines (two on module-level). """ before, after = self._maybe_empty_lines(current_line) before = ( # Black should not insert empty lines at the beginning # of the file 0 if self.previous_line is None else before - self.previous_after ) self.previous_after = after self.previous_line = current_line return before, after def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: max_allowed = 1 if current_line.depth == 0: max_allowed = 1 if self.is_pyi else 2 if current_line.leaves: # Consume the first leaf's extra newlines. first_leaf = current_line.leaves[0] before = first_leaf.prefix.count("\n") before = min(before, max_allowed) first_leaf.prefix = "" else: before = 0 depth = current_line.depth while self.previous_defs and self.previous_defs[-1] >= depth: self.previous_defs.pop() if self.is_pyi: before = 0 if depth else 1 else: before = 1 if depth else 2 if current_line.is_decorator or current_line.is_def or current_line.is_class: return self._maybe_empty_lines_for_class_or_def(current_line, before) if ( self.previous_line and self.previous_line.is_import and not current_line.is_import and depth == self.previous_line.depth ): return (before or 1), 0 if ( self.previous_line and self.previous_line.is_class and current_line.is_triple_quoted_string ): return before, 1 return before, 0 def _maybe_empty_lines_for_class_or_def( self, current_line: Line, before: int ) -> Tuple[int, int]: if not current_line.is_decorator: self.previous_defs.append(current_line.depth) if self.previous_line is None: # Don't insert empty lines before the first line in the file. return 0, 0 if self.previous_line.is_decorator: return 0, 0 if self.previous_line.depth < current_line.depth and ( self.previous_line.is_class or self.previous_line.is_def ): return 0, 0 if ( self.previous_line.is_comment and self.previous_line.depth == current_line.depth and before == 0 ): return 0, 0 if self.is_pyi: if self.previous_line.depth > current_line.depth: newlines = 1 elif current_line.is_class or self.previous_line.is_class: if current_line.is_stub_class and self.previous_line.is_stub_class: # No blank line between classes with an empty body newlines = 0 else: newlines = 1 elif current_line.is_def and not self.previous_line.is_def: # Blank line between a block of functions and a block of non-functions newlines = 1 else: newlines = 0 else: newlines = 2 if current_line.depth and newlines: newlines -= 1 return newlines, 0 @dataclass class LineGenerator(Visitor[Line]): """Generates reformatted Line objects. Empty lines are not emitted. Note: destroys the tree it's visiting by mutating prefixes of its leaves in ways that will no longer stringify to valid Python code on the tree. """ is_pyi: bool = False normalize_strings: bool = True current_line: Line = field(default_factory=Line) remove_u_prefix: bool = False def line(self, indent: int = 0) -> Iterator[Line]: """Generate a line. If the line is empty, only emit if it makes sense. If the line is too long, split it first and then generate. If any lines were generated, set up a new current_line. """ if not self.current_line: self.current_line.depth += indent return # Line is empty, don't emit. Creating a new one unnecessary. complete_line = self.current_line self.current_line = Line(depth=complete_line.depth + indent) yield complete_line def visit_default(self, node: LN) -> Iterator[Line]: """Default `visit_*()` implementation. Recurses to children of `node`.""" if isinstance(node, Leaf): any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() for comment in generate_comments(node): if any_open_brackets: # any comment within brackets is subject to splitting self.current_line.append(comment) elif comment.type == token.COMMENT: # regular trailing comment self.current_line.append(comment) yield from self.line() else: # regular standalone comment yield from self.line() self.current_line.append(comment) yield from self.line() normalize_prefix(node, inside_brackets=any_open_brackets) if self.normalize_strings and node.type == token.STRING: normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix) normalize_string_quotes(node) if node.type == token.NUMBER: normalize_numeric_literal(node) if node.type not in WHITESPACE: self.current_line.append(node) yield from super().visit_default(node) def visit_INDENT(self, node: Leaf) -> Iterator[Line]: """Increase indentation level, maybe yield a line.""" # In blib2to3 INDENT never holds comments. yield from self.line(+1) yield from self.visit_default(node) def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: """Decrease indentation level, maybe yield a line.""" # The current line might still wait for trailing comments. At DEDENT time # there won't be any (they would be prefixes on the preceding NEWLINE). # Emit the line then. yield from self.line() # While DEDENT has no value, its prefix may contain standalone comments # that belong to the current indentation level. Get 'em. yield from self.visit_default(node) # Finally, emit the dedent. yield from self.line(-1) def visit_stmt( self, node: Node, keywords: Set[str], parens: Set[str] ) -> Iterator[Line]: """Visit a statement. This implementation is shared for `if`, `while`, `for`, `try`, `except`, `def`, `with`, `class`, `assert` and assignments. The relevant Python language `keywords` for a given statement will be NAME leaves within it. This methods puts those on a separate line. `parens` holds a set of string leaf values immediately after which invisible parens should be put. """ normalize_invisible_parens(node, parens_after=parens) for child in node.children: if child.type == token.NAME and child.value in keywords: # type: ignore yield from self.line() yield from self.visit(child) def visit_suite(self, node: Node) -> Iterator[Line]: """Visit a suite.""" if self.is_pyi and is_stub_suite(node): yield from self.visit(node.children[2]) else: yield from self.visit_default(node) def visit_simple_stmt(self, node: Node) -> Iterator[Line]: """Visit a statement without nested statements.""" is_suite_like = node.parent and node.parent.type in STATEMENT if is_suite_like: if self.is_pyi and is_stub_body(node): yield from self.visit_default(node) else: yield from self.line(+1) yield from self.visit_default(node) yield from self.line(-1) else: if not self.is_pyi or not node.parent or not is_stub_suite(node.parent): yield from self.line() yield from self.visit_default(node) def visit_async_stmt(self, node: Node) -> Iterator[Line]: """Visit `async def`, `async for`, `async with`.""" yield from self.line() children = iter(node.children) for child in children: yield from self.visit(child) if child.type == token.ASYNC: break internal_stmt = next(children) for child in internal_stmt.children: yield from self.visit(child) def visit_decorators(self, node: Node) -> Iterator[Line]: """Visit decorators.""" for child in node.children: yield from self.line() yield from self.visit(child) def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: """Remove a semicolon and put the other statement on a separate line.""" yield from self.line() def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: """End of file. Process outstanding comments and end with a newline.""" yield from self.visit_default(leaf) yield from self.line() def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: if not self.current_line.bracket_tracker.any_open_brackets(): yield from self.line() yield from self.visit_default(leaf) def visit_factor(self, node: Node) -> Iterator[Line]: """Force parentheses between a unary op and a binary power: -2 ** 8 -> -(2 ** 8) """ _operator, operand = node.children if ( operand.type == syms.power and len(operand.children) == 3 and operand.children[1].type == token.DOUBLESTAR ): lpar = Leaf(token.LPAR, "(") rpar = Leaf(token.RPAR, ")") index = operand.remove() or 0 node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) yield from self.visit_default(node) def __post_init__(self) -> None: """You are in a twisty little maze of passages.""" v = self.visit_stmt Ø: Set[str] = set() self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) self.visit_if_stmt = partial( v, keywords={"if", "else", "elif"}, parens={"if", "elif"} ) self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) self.visit_try_stmt = partial( v, keywords={"try", "except", "else", "finally"}, parens=Ø ) self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø) self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) self.visit_async_funcdef = self.visit_async_stmt self.visit_decorated = self.visit_decorators IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist} BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE} OPENING_BRACKETS = set(BRACKET.keys()) CLOSING_BRACKETS = set(BRACKET.values()) BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT} def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 """Return whitespace prefix if needed for the given `leaf`. `complex_subscript` signals whether the given leaf is part of a subscription which has non-trivial arguments, like arithmetic expressions or function calls. """ NO = "" SPACE = " " DOUBLESPACE = " " t = leaf.type p = leaf.parent v = leaf.value if t in ALWAYS_NO_SPACE: return NO if t == token.COMMENT: return DOUBLESPACE assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" if t == token.COLON and p.type not in { syms.subscript, syms.subscriptlist, syms.sliceop, }: return NO prev = leaf.prev_sibling if not prev: prevp = preceding_leaf(p) if not prevp or prevp.type in OPENING_BRACKETS: return NO if t == token.COLON: if prevp.type == token.COLON: return NO elif prevp.type != token.COMMA and not complex_subscript: return NO return SPACE if prevp.type == token.EQUAL: if prevp.parent: if prevp.parent.type in { syms.arglist, syms.argument, syms.parameters, syms.varargslist, }: return NO elif prevp.parent.type == syms.typedargslist: # A bit hacky: if the equal sign has whitespace, it means we # previously found it's a typed argument. So, we're using # that, too. return prevp.prefix elif prevp.type in VARARGS_SPECIALS: if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): return NO elif prevp.type == token.COLON: if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: return SPACE if complex_subscript else NO elif ( prevp.parent and prevp.parent.type == syms.factor and prevp.type in MATH_OPERATORS ): return NO elif ( prevp.type == token.RIGHTSHIFT and prevp.parent and prevp.parent.type == syms.shift_expr and prevp.prev_sibling and prevp.prev_sibling.type == token.NAME and prevp.prev_sibling.value == "print" # type: ignore ): # Python 2 print chevron return NO elif prev.type in OPENING_BRACKETS: return NO if p.type in {syms.parameters, syms.arglist}: # untyped function signatures or calls if not prev or prev.type != token.COMMA: return NO elif p.type == syms.varargslist: # lambdas if prev and prev.type != token.COMMA: return NO elif p.type == syms.typedargslist: # typed function signatures if not prev: return NO if t == token.EQUAL: if prev.type != syms.tname: return NO elif prev.type == token.EQUAL: # A bit hacky: if the equal sign has whitespace, it means we # previously found it's a typed argument. So, we're using that, too. return prev.prefix elif prev.type != token.COMMA: return NO elif p.type == syms.tname: # type names if not prev: prevp = preceding_leaf(p) if not prevp or prevp.type != token.COMMA: return NO elif p.type == syms.trailer: # attributes and calls if t == token.LPAR or t == token.RPAR: return NO if not prev: if t == token.DOT: prevp = preceding_leaf(p) if not prevp or prevp.type != token.NUMBER: return NO elif t == token.LSQB: return NO elif prev.type != token.COMMA: return NO elif p.type == syms.argument: # single argument if t == token.EQUAL: return NO if not prev: prevp = preceding_leaf(p) if not prevp or prevp.type == token.LPAR: return NO elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: return NO elif p.type == syms.decorator: # decorators return NO elif p.type == syms.dotted_name: if prev: return NO prevp = preceding_leaf(p) if not prevp or prevp.type == token.AT or prevp.type == token.DOT: return NO elif p.type == syms.classdef: if t == token.LPAR: return NO if prev and prev.type == token.LPAR: return NO elif p.type in {syms.subscript, syms.sliceop}: # indexing if not prev: assert p.parent is not None, "subscripts are always parented" if p.parent.type == syms.subscriptlist: return SPACE return NO elif not complex_subscript: return NO elif p.type == syms.atom: if prev and t == token.DOT: # dots, but not the first one. return NO elif p.type == syms.dictsetmaker: # dict unpacking if prev and prev.type == token.DOUBLESTAR: return NO elif p.type in {syms.factor, syms.star_expr}: # unary ops if not prev: prevp = preceding_leaf(p) if not prevp or prevp.type in OPENING_BRACKETS: return NO prevp_parent = prevp.parent assert prevp_parent is not None if prevp.type == token.COLON and prevp_parent.type in { syms.subscript, syms.sliceop, }: return NO elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: return NO elif t in {token.NAME, token.NUMBER, token.STRING}: return NO elif p.type == syms.import_from: if t == token.DOT: if prev and prev.type == token.DOT: return NO elif t == token.NAME: if v == "import": return SPACE if prev and prev.type == token.DOT: return NO elif p.type == syms.sliceop: return NO return SPACE def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: """Return the first leaf that precedes `node`, if any.""" while node: res = node.prev_sibling if res: if isinstance(res, Leaf): return res try: return list(res.leaves())[-1] except IndexError: return None node = node.parent return None def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]: """Return the child of `ancestor` that contains `descendant`.""" node: Optional[LN] = descendant while node and node.parent != ancestor: node = node.parent return node def container_of(leaf: Leaf) -> LN: """Return `leaf` or one of its ancestors that is the topmost container of it. By "container" we mean a node where `leaf` is the very first child. """ same_prefix = leaf.prefix container: LN = leaf while container: parent = container.parent if parent is None: break if parent.children[0].prefix != same_prefix: break if parent.type == syms.file_input: break if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: break container = parent return container def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: """Return the priority of the `leaf` delimiter, given a line break after it. The delimiter priorities returned here are from those delimiters that would cause a line break after themselves. Higher numbers are higher priority. """ if leaf.type == token.COMMA: return COMMA_PRIORITY return 0 def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: """Return the priority of the `leaf` delimiter, given a line break before it. The delimiter priorities returned here are from those delimiters that would cause a line break before themselves. Higher numbers are higher priority. """ if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): # * and ** might also be MATH_OPERATORS but in this case they are not. # Don't treat them as a delimiter. return 0 if ( leaf.type == token.DOT and leaf.parent and leaf.parent.type not in {syms.import_from, syms.dotted_name} and (previous is None or previous.type in CLOSING_BRACKETS) ): return DOT_PRIORITY if ( leaf.type in MATH_OPERATORS and leaf.parent and leaf.parent.type not in {syms.factor, syms.star_expr} ): return MATH_PRIORITIES[leaf.type] if leaf.type in COMPARATORS: return COMPARATOR_PRIORITY if ( leaf.type == token.STRING and previous is not None and previous.type == token.STRING ): return STRING_PRIORITY if leaf.type not in {token.NAME, token.ASYNC}: return 0 if ( leaf.value == "for" and leaf.parent and leaf.parent.type in {syms.comp_for, syms.old_comp_for} or leaf.type == token.ASYNC ): if ( not isinstance(leaf.prev_sibling, Leaf) or leaf.prev_sibling.value != "async" ): return COMPREHENSION_PRIORITY if ( leaf.value == "if" and leaf.parent and leaf.parent.type in {syms.comp_if, syms.old_comp_if} ): return COMPREHENSION_PRIORITY if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: return TERNARY_PRIORITY if leaf.value == "is": return COMPARATOR_PRIORITY if ( leaf.value == "in" and leaf.parent and leaf.parent.type in {syms.comp_op, syms.comparison} and not ( previous is not None and previous.type == token.NAME and previous.value == "not" ) ): return COMPARATOR_PRIORITY if ( leaf.value == "not" and leaf.parent and leaf.parent.type == syms.comp_op and not ( previous is not None and previous.type == token.NAME and previous.value == "is" ) ): return COMPARATOR_PRIORITY if leaf.value in LOGIC_OPERATORS and leaf.parent: return LOGIC_PRIORITY return 0 FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"} FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"} def generate_comments(leaf: LN) -> Iterator[Leaf]: """Clean the prefix of the `leaf` and generate comments from it, if any. Comments in lib2to3 are shoved into the whitespace prefix. This happens in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation move because it does away with modifying the grammar to include all the possible places in which comments can be placed. The sad consequence for us though is that comments don't "belong" anywhere. This is why this function generates simple parentless Leaf objects for comments. We simply don't know what the correct parent should be. No matter though, we can live without this. We really only need to differentiate between inline and standalone comments. The latter don't share the line with any code. Inline comments are emitted as regular token.COMMENT leaves. Standalone are emitted with a fake STANDALONE_COMMENT token identifier. """ for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER): yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) @dataclass class ProtoComment: """Describes a piece of syntax that is a comment. It's not a :class:`blib2to3.pytree.Leaf` so that: * it can be cached (`Leaf` objects should not be reused more than once as they store their lineno, column, prefix, and parent information); * `newlines` and `consumed` fields are kept separate from the `value`. This simplifies handling of special marker comments like ``# fmt: off/on``. """ type: int # token.COMMENT or STANDALONE_COMMENT value: str # content of the comment newlines: int # how many newlines before the comment consumed: int # how many characters of the original leaf's prefix did we consume @lru_cache(maxsize=4096) def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" result: List[ProtoComment] = [] if not prefix or "#" not in prefix: return result consumed = 0 nlines = 0 ignored_lines = 0 for index, line in enumerate(prefix.split("\n")): consumed += len(line) + 1 # adding the length of the split '\n' line = line.lstrip() if not line: nlines += 1 if not line.startswith("#"): # Escaped newlines outside of a comment are not really newlines at # all. We treat a single-line comment following an escaped newline # as a simple trailing comment. if line.endswith("\\"): ignored_lines += 1 continue if index == ignored_lines and not is_endmarker: comment_type = token.COMMENT # simple trailing comment else: comment_type = STANDALONE_COMMENT comment = make_comment(line) result.append( ProtoComment( type=comment_type, value=comment, newlines=nlines, consumed=consumed ) ) nlines = 0 return result def make_comment(content: str) -> str: """Return a consistently formatted comment from the given `content` string. All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single space between the hash sign and the content. If `content` didn't start with a hash sign, one is provided. """ content = content.rstrip() if not content: return "#" if content[0] == "#": content = content[1:] if content and content[0] not in " !:#'%": content = " " + content return "#" + content def split_line( line: Line, line_length: int, inner: bool = False, features: Collection[Feature] = (), ) -> Iterator[Line]: """Split a `line` into potentially many lines. They should fit in the allotted `line_length` but might not be able to. `inner` signifies that there were a pair of brackets somewhere around the current `line`, possibly transitively. This means we can fallback to splitting by delimiters if the LHS/RHS don't yield any results. `features` are syntactical features that may be used in the output. """ if line.is_comment: yield line return line_str = str(line).strip("\n") if ( not line.contains_uncollapsable_type_comments() and not line.should_explode and not line.is_collection_with_optional_trailing_comma and ( is_line_short_enough(line, line_length=line_length, line_str=line_str) or line.contains_unsplittable_type_ignore() ) ): yield line return split_funcs: List[SplitFunc] if line.is_def: split_funcs = [left_hand_split] else: def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]: for omit in generate_trailers_to_omit(line, line_length): lines = list(right_hand_split(line, line_length, features, omit=omit)) if is_line_short_enough(lines[0], line_length=line_length): yield from lines return # All splits failed, best effort split with no omits. # This mostly happens to multiline strings that are by definition # reported as not fitting a single line. # line_length=1 here was historically a bug that somehow became a feature. # See #762 and #781 for the full story. yield from right_hand_split(line, line_length=1, features=features) if line.inside_brackets: split_funcs = [delimiter_split, standalone_comment_split, rhs] else: split_funcs = [rhs] for split_func in split_funcs: # We are accumulating lines in `result` because we might want to abort # mission and return the original line in the end, or attempt a different # split altogether. result: List[Line] = [] try: for l in split_func(line, features): if str(l).strip("\n") == line_str: raise CannotSplit("Split function returned an unchanged result") result.extend( split_line( l, line_length=line_length, inner=True, features=features ) ) except CannotSplit: continue else: yield from result break else: yield line def left_hand_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: """Split line into many lines, starting with the first matching bracket pair. Note: this usually looks weird, only use this for function definitions. Prefer RHS otherwise. This is why this function is not symmetrical with :func:`right_hand_split` which also handles optional parentheses. """ tail_leaves: List[Leaf] = [] body_leaves: List[Leaf] = [] head_leaves: List[Leaf] = [] current_leaves = head_leaves matching_bracket: Optional[Leaf] = None for leaf in line.leaves: if ( current_leaves is body_leaves and leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is matching_bracket ): current_leaves = tail_leaves if body_leaves else head_leaves current_leaves.append(leaf) if current_leaves is head_leaves: if leaf.type in OPENING_BRACKETS: matching_bracket = leaf current_leaves = body_leaves if not matching_bracket: raise CannotSplit("No brackets found") head = bracket_split_build_line(head_leaves, line, matching_bracket) body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True) tail = bracket_split_build_line(tail_leaves, line, matching_bracket) bracket_split_succeeded_or_raise(head, body, tail) for result in (head, body, tail): if result: yield result def right_hand_split( line: Line, line_length: int, features: Collection[Feature] = (), omit: Collection[LeafID] = (), ) -> Iterator[Line]: """Split line into many lines, starting with the last matching bracket pair. If the split was by optional parentheses, attempt splitting without them, too. `omit` is a collection of closing bracket IDs that shouldn't be considered for this split. Note: running this function modifies `bracket_depth` on the leaves of `line`. """ tail_leaves: List[Leaf] = [] body_leaves: List[Leaf] = [] head_leaves: List[Leaf] = [] current_leaves = tail_leaves opening_bracket: Optional[Leaf] = None closing_bracket: Optional[Leaf] = None for leaf in reversed(line.leaves): if current_leaves is body_leaves: if leaf is opening_bracket: current_leaves = head_leaves if body_leaves else tail_leaves current_leaves.append(leaf) if current_leaves is tail_leaves: if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: opening_bracket = leaf.opening_bracket closing_bracket = leaf current_leaves = body_leaves if not (opening_bracket and closing_bracket and head_leaves): # If there is no opening or closing_bracket that means the split failed and # all content is in the tail. Otherwise, if `head_leaves` are empty, it means # the matching `opening_bracket` wasn't available on `line` anymore. raise CannotSplit("No brackets found") tail_leaves.reverse() body_leaves.reverse() head_leaves.reverse() head = bracket_split_build_line(head_leaves, line, opening_bracket) body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True) tail = bracket_split_build_line(tail_leaves, line, opening_bracket) bracket_split_succeeded_or_raise(head, body, tail) if ( # the body shouldn't be exploded not body.should_explode # the opening bracket is an optional paren and opening_bracket.type == token.LPAR and not opening_bracket.value # the closing bracket is an optional paren and closing_bracket.type == token.RPAR and not closing_bracket.value # it's not an import (optional parens are the only thing we can split on # in this case; attempting a split without them is a waste of time) and not line.is_import # there are no standalone comments in the body and not body.contains_standalone_comments(0) # and we can actually remove the parens and can_omit_invisible_parens(body, line_length) ): omit = {id(closing_bracket), *omit} try: yield from right_hand_split(line, line_length, features=features, omit=omit) return except CannotSplit: if not ( can_be_split(body) or is_line_short_enough(body, line_length=line_length) ): raise CannotSplit( "Splitting failed, body is still too long and can't be split." ) elif head.contains_multiline_strings() or tail.contains_multiline_strings(): raise CannotSplit( "The current optional pair of parentheses is bound to fail to " "satisfy the splitting algorithm because the head or the tail " "contains multiline strings which by definition never fit one " "line." ) ensure_visible(opening_bracket) ensure_visible(closing_bracket) for result in (head, body, tail): if result: yield result def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. Do nothing otherwise. A left- or right-hand split is based on a pair of brackets. Content before (and including) the opening bracket is left on one line, content inside the brackets is put on a separate line, and finally content starting with and following the closing bracket is put on a separate line. Those are called `head`, `body`, and `tail`, respectively. If the split produced the same line (all content in `head`) or ended up with an empty `body` and the `tail` is just the closing bracket, then it's considered failed. """ tail_len = len(str(tail).strip()) if not body: if tail_len == 0: raise CannotSplit("Splitting brackets produced the same line") elif tail_len < 3: raise CannotSplit( f"Splitting brackets on an empty body to save " f"{tail_len} characters is not worth it" ) def bracket_split_build_line( leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False ) -> Line: """Return a new line with given `leaves` and respective comments from `original`. If `is_body` is True, the result line is one-indented inside brackets and as such has its first leaf's prefix normalized and a trailing comma added when expected. """ result = Line(depth=original.depth) if is_body: result.inside_brackets = True result.depth += 1 if leaves: # Since body is a new indent level, remove spurious leading whitespace. normalize_prefix(leaves[0], inside_brackets=True) # Ensure a trailing comma for imports and standalone function arguments, but # be careful not to add one after any comments or within type annotations. no_commas = ( original.is_def and opening_bracket.value == "(" and not any(l.type == token.COMMA for l in leaves) ) if original.is_import or no_commas: for i in range(len(leaves) - 1, -1, -1): if leaves[i].type == STANDALONE_COMMENT: continue if leaves[i].type != token.COMMA: leaves.insert(i + 1, Leaf(token.COMMA, ",")) break # Populate the line for leaf in leaves: result.append(leaf, preformatted=True) for comment_after in original.comments_after(leaf): result.append(comment_after, preformatted=True) if is_body: result.should_explode = should_explode(result, opening_bracket) return result def dont_increase_indentation(split_func: SplitFunc) -> SplitFunc: """Normalize prefix of the first leaf in every line returned by `split_func`. This is a decorator over relevant split functions. """ @wraps(split_func) def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: for l in split_func(line, features): normalize_prefix(l.leaves[0], inside_brackets=True) yield l return split_wrapper @dont_increase_indentation def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: """Split according to delimiters of the highest priority. If the appropriate Features are given, the split will add trailing commas also in function signatures and calls that contain `*` and `**`. """ try: last_leaf = line.leaves[-1] except IndexError: raise CannotSplit("Line empty") bt = line.bracket_tracker try: delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) except ValueError: raise CannotSplit("No delimiters found") if delimiter_priority == DOT_PRIORITY: if bt.delimiter_count_with_priority(delimiter_priority) == 1: raise CannotSplit("Splitting a single attribute from its owner looks wrong") current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) lowest_depth = sys.maxsize trailing_comma_safe = True def append_to_line(leaf: Leaf) -> Iterator[Line]: """Append `leaf` to current line or to new line if appending impossible.""" nonlocal current_line try: current_line.append_safe(leaf, preformatted=True) except ValueError: yield current_line current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) current_line.append(leaf) for leaf in line.leaves: yield from append_to_line(leaf) for comment_after in line.comments_after(leaf): yield from append_to_line(comment_after) lowest_depth = min(lowest_depth, leaf.bracket_depth) if leaf.bracket_depth == lowest_depth: if is_vararg(leaf, within={syms.typedargslist}): trailing_comma_safe = ( trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features ) elif is_vararg(leaf, within={syms.arglist, syms.argument}): trailing_comma_safe = ( trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features ) leaf_priority = bt.delimiters.get(id(leaf)) if leaf_priority == delimiter_priority: yield current_line current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) if current_line: if ( trailing_comma_safe and delimiter_priority == COMMA_PRIORITY and current_line.leaves[-1].type != token.COMMA and current_line.leaves[-1].type != STANDALONE_COMMENT ): current_line.append(Leaf(token.COMMA, ",")) yield current_line @dont_increase_indentation def standalone_comment_split( line: Line, features: Collection[Feature] = () ) -> Iterator[Line]: """Split standalone comments from the rest of the line.""" if not line.contains_standalone_comments(0): raise CannotSplit("Line does not have any standalone comments") current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) def append_to_line(leaf: Leaf) -> Iterator[Line]: """Append `leaf` to current line or to new line if appending impossible.""" nonlocal current_line try: current_line.append_safe(leaf, preformatted=True) except ValueError: yield current_line current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) current_line.append(leaf) for leaf in line.leaves: yield from append_to_line(leaf) for comment_after in line.comments_after(leaf): yield from append_to_line(comment_after) if current_line: yield current_line def is_import(leaf: Leaf) -> bool: """Return True if the given leaf starts an import statement.""" p = leaf.parent t = leaf.type v = leaf.value return bool( t == token.NAME and ( (v == "import" and p and p.type == syms.import_name) or (v == "from" and p and p.type == syms.import_from) ) ) def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: """Return True if the given leaf is a special comment. Only returns true for type comments for now.""" t = leaf.type v = leaf.value return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: """Leave existing extra newlines if not `inside_brackets`. Remove everything else. Note: don't use backslashes for formatting or you'll lose your voting rights. """ if not inside_brackets: spl = leaf.prefix.split("#") if "\\" not in spl[0]: nl_count = spl[-1].count("\n") if len(spl) > 1: nl_count -= 1 leaf.prefix = "\n" * nl_count return leaf.prefix = "" def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None: """Make all string prefixes lowercase. If remove_u_prefix is given, also removes any u prefix from the string. Note: Mutates its argument. """ match = re.match(r"^([furbFURB]*)(.*)$", leaf.value, re.DOTALL) assert match is not None, f"failed to match string {leaf.value!r}" orig_prefix = match.group(1) new_prefix = orig_prefix.lower() if remove_u_prefix: new_prefix = new_prefix.replace("u", "") leaf.value = f"{new_prefix}{match.group(2)}" def normalize_string_quotes(leaf: Leaf) -> None: """Prefer double quotes but only if it doesn't cause more escaping. Adds or removes backslashes as appropriate. Doesn't parse and fix strings nested in f-strings (yet). Note: Mutates its argument. """ value = leaf.value.lstrip("furbFURB") if value[:3] == '"""': return elif value[:3] == "'''": orig_quote = "'''" new_quote = '"""' elif value[0] == '"': orig_quote = '"' new_quote = "'" else: orig_quote = "'" new_quote = '"' first_quote_pos = leaf.value.find(orig_quote) if first_quote_pos == -1: return # There's an internal error prefix = leaf.value[:first_quote_pos] unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}") escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)] if "r" in prefix.casefold(): if unescaped_new_quote.search(body): # There's at least one unescaped new_quote in this raw string # so converting is impossible return # Do not introduce or remove backslashes in raw strings new_body = body else: # remove unnecessary escapes new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) if body != new_body: # Consider the string without unnecessary escapes as the original body = new_body leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}" new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) if "f" in prefix.casefold(): matches = re.findall( r""" (?:[^{]|^)\{ # start of the string or a non-{ followed by a single { ([^{].*?) # contents of the brackets except if begins with {{ \}(?:[^}]|$) # A } followed by end of the string or a non-} """, new_body, re.VERBOSE, ) for m in matches: if "\\" in str(m): # Do not introduce backslashes in interpolated expressions return if new_quote == '"""' and new_body[-1:] == '"': # edge case: new_body = new_body[:-1] + '\\"' orig_escape_count = body.count("\\") new_escape_count = new_body.count("\\") if new_escape_count > orig_escape_count: return # Do not introduce more escaping if new_escape_count == orig_escape_count and orig_quote == '"': return # Prefer double quotes leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}" def normalize_numeric_literal(leaf: Leaf) -> None: """Normalizes numeric (float, int, and complex) literals. All letters used in the representation are normalized to lowercase (except in Python 2 long literals). """ text = leaf.value.lower() if text.startswith(("0o", "0b")): # Leave octal and binary literals alone. pass elif text.startswith("0x"): # Change hex literals to upper case. before, after = text[:2], text[2:] text = f"{before}{after.upper()}" elif "e" in text: before, after = text.split("e") sign = "" if after.startswith("-"): after = after[1:] sign = "-" elif after.startswith("+"): after = after[1:] before = format_float_or_int_string(before) text = f"{before}e{sign}{after}" elif text.endswith(("j", "l")): number = text[:-1] suffix = text[-1] # Capitalize in "2L" because "l" looks too similar to "1". if suffix == "l": suffix = "L" text = f"{format_float_or_int_string(number)}{suffix}" else: text = format_float_or_int_string(text) leaf.value = text def format_float_or_int_string(text: str) -> str: """Formats a float string like "1.0".""" if "." not in text: return text before, after = text.split(".") return f"{before or 0}.{after or 0}" def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: """Make existing optional parentheses invisible or create new ones. `parens_after` is a set of string leaf values immediately after which parens should be put. Standardizes on visible parentheses for single-element tuples, and keeps existing visible parentheses for other tuples and generator expressions. """ for pc in list_comments(node.prefix, is_endmarker=False): if pc.value in FMT_OFF: # This `node` has a prefix with `# fmt: off`, don't mess with parens. return check_lpar = False for index, child in enumerate(list(node.children)): # Add parentheses around long tuple unpacking in assignments. if ( index == 0 and isinstance(child, Node) and child.type == syms.testlist_star_expr ): check_lpar = True if check_lpar: if is_walrus_assignment(child): continue if child.type == syms.atom: if maybe_make_parens_invisible_in_atom(child, parent=node): wrap_in_parentheses(node, child, visible=False) elif is_one_tuple(child): wrap_in_parentheses(node, child, visible=True) elif node.type == syms.import_from: # "import from" nodes store parentheses directly as part of # the statement if child.type == token.LPAR: # make parentheses invisible child.value = "" # type: ignore node.children[-1].value = "" # type: ignore elif child.type != token.STAR: # insert invisible parentheses node.insert_child(index, Leaf(token.LPAR, "")) node.append_child(Leaf(token.RPAR, "")) break elif not (isinstance(child, Leaf) and is_multiline_string(child)): wrap_in_parentheses(node, child, visible=False) check_lpar = isinstance(child, Leaf) and child.value in parens_after def normalize_fmt_off(node: Node) -> None: """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" try_again = True while try_again: try_again = convert_one_fmt_off_pair(node) def convert_one_fmt_off_pair(node: Node) -> bool: """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. Returns True if a pair was converted. """ for leaf in node.leaves(): previous_consumed = 0 for comment in list_comments(leaf.prefix, is_endmarker=False): if comment.value in FMT_OFF: # We only want standalone comments. If there's no previous leaf or # the previous leaf is indentation, it's a standalone comment in # disguise. if comment.type != STANDALONE_COMMENT: prev = preceding_leaf(leaf) if prev and prev.type not in WHITESPACE: continue ignored_nodes = list(generate_ignored_nodes(leaf)) if not ignored_nodes: continue first = ignored_nodes[0] # Can be a container node with the `leaf`. parent = first.parent prefix = first.prefix first.prefix = prefix[comment.consumed :] hidden_value = ( comment.value + "\n" + "".join(str(n) for n in ignored_nodes) ) if hidden_value.endswith("\n"): # That happens when one of the `ignored_nodes` ended with a NEWLINE # leaf (possibly followed by a DEDENT). hidden_value = hidden_value[:-1] first_idx: Optional[int] = None for ignored in ignored_nodes: index = ignored.remove() if first_idx is None: first_idx = index assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" parent.insert_child( first_idx, Leaf( STANDALONE_COMMENT, hidden_value, prefix=prefix[:previous_consumed] + "\n" * comment.newlines, ), ) return True previous_consumed = comment.consumed return False def generate_ignored_nodes(leaf: Leaf) -> Iterator[LN]: """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. Stops at the end of the block. """ container: Optional[LN] = container_of(leaf) while container is not None and container.type != token.ENDMARKER: is_fmt_on = False for comment in list_comments(container.prefix, is_endmarker=False): if comment.value in FMT_ON: is_fmt_on = True elif comment.value in FMT_OFF: is_fmt_on = False if is_fmt_on: return yield container container = container.next_sibling def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: """If it's safe, make the parens in the atom `node` invisible, recursively. Additionally, remove repeated, adjacent invisible parens from the atom `node` as they are redundant. Returns whether the node should itself be wrapped in invisible parentheses. """ if ( node.type != syms.atom or is_empty_tuple(node) or is_one_tuple(node) or (is_yield(node) and parent.type != syms.expr_stmt) or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY ): return False first = node.children[0] last = node.children[-1] if first.type == token.LPAR and last.type == token.RPAR: middle = node.children[1] # make parentheses invisible first.value = "" # type: ignore last.value = "" # type: ignore maybe_make_parens_invisible_in_atom(middle, parent=parent) if is_atom_with_invisible_parens(middle): # Strip the invisible parens from `middle` by replacing # it with the child in-between the invisible parens middle.replace(middle.children[1]) return False return True def is_atom_with_invisible_parens(node: LN) -> bool: """Given a `LN`, determines whether it's an atom `node` with invisible parens. Useful in dedupe-ing and normalizing parens. """ if isinstance(node, Leaf) or node.type != syms.atom: return False first, last = node.children[0], node.children[-1] return ( isinstance(first, Leaf) and first.type == token.LPAR and first.value == "" and isinstance(last, Leaf) and last.type == token.RPAR and last.value == "" ) def is_empty_tuple(node: LN) -> bool: """Return True if `node` holds an empty tuple.""" return ( node.type == syms.atom and len(node.children) == 2 and node.children[0].type == token.LPAR and node.children[1].type == token.RPAR ) def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]: """Returns `wrapped` if `node` is of the shape ( wrapped ). Parenthesis can be optional. Returns None otherwise""" if len(node.children) != 3: return None lpar, wrapped, rpar = node.children if not (lpar.type == token.LPAR and rpar.type == token.RPAR): return None return wrapped def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: """Wrap `child` in parentheses. This replaces `child` with an atom holding the parentheses and the old child. That requires moving the prefix. If `visible` is False, the leaves will be valueless (and thus invisible). """ lpar = Leaf(token.LPAR, "(" if visible else "") rpar = Leaf(token.RPAR, ")" if visible else "") prefix = child.prefix child.prefix = "" index = child.remove() or 0 new_child = Node(syms.atom, [lpar, child, rpar]) new_child.prefix = prefix parent.insert_child(index, new_child) def is_one_tuple(node: LN) -> bool: """Return True if `node` holds a tuple with one element, with or without parens.""" if node.type == syms.atom: gexp = unwrap_singleton_parenthesis(node) if gexp is None or gexp.type != syms.testlist_gexp: return False return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA return ( node.type in IMPLICIT_TUPLE and len(node.children) == 2 and node.children[1].type == token.COMMA ) def is_walrus_assignment(node: LN) -> bool: """Return True iff `node` is of the shape ( test := test )""" inner = unwrap_singleton_parenthesis(node) return inner is not None and inner.type == syms.namedexpr_test def is_yield(node: LN) -> bool: """Return True if `node` holds a `yield` or `yield from` expression.""" if node.type == syms.yield_expr: return True if node.type == token.NAME and node.value == "yield": # type: ignore return True if node.type != syms.atom: return False if len(node.children) != 3: return False lpar, expr, rpar = node.children if lpar.type == token.LPAR and rpar.type == token.RPAR: return is_yield(expr) return False def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: """Return True if `leaf` is a star or double star in a vararg or kwarg. If `within` includes VARARGS_PARENTS, this applies to function signatures. If `within` includes UNPACKING_PARENTS, it applies to right hand-side extended iterable unpacking (PEP 3132) and additional unpacking generalizations (PEP 448). """ if leaf.type not in VARARGS_SPECIALS or not leaf.parent: return False p = leaf.parent if p.type == syms.star_expr: # Star expressions are also used as assignment targets in extended # iterable unpacking (PEP 3132). See what its parent is instead. if not p.parent: return False p = p.parent return p.type in within def is_multiline_string(leaf: Leaf) -> bool: """Return True if `leaf` is a multiline string that actually spans many lines.""" value = leaf.value.lstrip("furbFURB") return value[:3] in {'"""', "'''"} and "\n" in value def is_stub_suite(node: Node) -> bool: """Return True if `node` is a suite with a stub body.""" if ( len(node.children) != 4 or node.children[0].type != token.NEWLINE or node.children[1].type != token.INDENT or node.children[3].type != token.DEDENT ): return False return is_stub_body(node.children[2]) def is_stub_body(node: LN) -> bool: """Return True if `node` is a simple statement containing an ellipsis.""" if not isinstance(node, Node) or node.type != syms.simple_stmt: return False if len(node.children) != 2: return False child = node.children[0] return ( child.type == syms.atom and len(child.children) == 3 and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) ) def max_delimiter_priority_in_atom(node: LN) -> Priority: """Return maximum delimiter priority inside `node`. This is specific to atoms with contents contained in a pair of parentheses. If `node` isn't an atom or there are no enclosing parentheses, returns 0. """ if node.type != syms.atom: return 0 first = node.children[0] last = node.children[-1] if not (first.type == token.LPAR and last.type == token.RPAR): return 0 bt = BracketTracker() for c in node.children[1:-1]: if isinstance(c, Leaf): bt.mark(c) else: for leaf in c.leaves(): bt.mark(leaf) try: return bt.max_delimiter_priority() except ValueError: return 0 def ensure_visible(leaf: Leaf) -> None: """Make sure parentheses are visible. They could be invisible as part of some statements (see :func:`normalize_invisible_parens` and :func:`visit_import_from`). """ if leaf.type == token.LPAR: leaf.value = "(" elif leaf.type == token.RPAR: leaf.value = ")" def should_explode(line: Line, opening_bracket: Leaf) -> bool: """Should `line` immediately be split with `delimiter_split()` after RHS?""" if not ( opening_bracket.parent and opening_bracket.parent.type in {syms.atom, syms.import_from} and opening_bracket.value in "[{(" ): return False try: last_leaf = line.leaves[-1] exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set() max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) except (IndexError, ValueError): return False return max_priority == COMMA_PRIORITY def get_features_used(node: Node) -> Set[Feature]: """Return a set of (relatively) new Python features used in this file. Currently looking for: - f-strings; - underscores in numeric literals; - trailing commas after * or ** in function signatures and calls; - positional only arguments in function signatures and lambdas; """ features: Set[Feature] = set() for n in node.pre_order(): if n.type == token.STRING: value_head = n.value[:2] # type: ignore if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}: features.add(Feature.F_STRINGS) elif n.type == token.NUMBER: if "_" in n.value: # type: ignore features.add(Feature.NUMERIC_UNDERSCORES) elif n.type == token.SLASH: if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}: features.add(Feature.POS_ONLY_ARGUMENTS) elif n.type == token.COLONEQUAL: features.add(Feature.ASSIGNMENT_EXPRESSIONS) elif ( n.type in {syms.typedargslist, syms.arglist} and n.children and n.children[-1].type == token.COMMA ): if n.type == syms.typedargslist: feature = Feature.TRAILING_COMMA_IN_DEF else: feature = Feature.TRAILING_COMMA_IN_CALL for ch in n.children: if ch.type in STARS: features.add(feature) if ch.type == syms.argument: for argch in ch.children: if argch.type in STARS: features.add(feature) return features def detect_target_versions(node: Node) -> Set[TargetVersion]: """Detect the version to target based on the nodes used.""" features = get_features_used(node) return { version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] } def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: """Generate sets of closing bracket IDs that should be omitted in a RHS. Brackets can be omitted if the entire trailer up to and including a preceding closing bracket fits in one line. Yielded sets are cumulative (contain results of previous yields, too). First set is empty. """ omit: Set[LeafID] = set() yield omit length = 4 * line.depth opening_bracket: Optional[Leaf] = None closing_bracket: Optional[Leaf] = None inner_brackets: Set[LeafID] = set() for index, leaf, leaf_length in enumerate_with_length(line, reversed=True): length += leaf_length if length > line_length: break has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) if leaf.type == STANDALONE_COMMENT or has_inline_comment: break if opening_bracket: if leaf is opening_bracket: opening_bracket = None elif leaf.type in CLOSING_BRACKETS: inner_brackets.add(id(leaf)) elif leaf.type in CLOSING_BRACKETS: if index > 0 and line.leaves[index - 1].type in OPENING_BRACKETS: # Empty brackets would fail a split so treat them as "inner" # brackets (e.g. only add them to the `omit` set if another # pair of brackets was good enough. inner_brackets.add(id(leaf)) continue if closing_bracket: omit.add(id(closing_bracket)) omit.update(inner_brackets) inner_brackets.clear() yield omit if leaf.value: opening_bracket = leaf.opening_bracket closing_bracket = leaf def get_future_imports(node: Node) -> Set[str]: """Return a set of __future__ imports in the file.""" imports: Set[str] = set() def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]: for child in children: if isinstance(child, Leaf): if child.type == token.NAME: yield child.value elif child.type == syms.import_as_name: orig_name = child.children[0] assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports" assert orig_name.type == token.NAME, "Invalid syntax parsing imports" yield orig_name.value elif child.type == syms.import_as_names: yield from get_imports_from_children(child.children) else: raise AssertionError("Invalid syntax parsing imports") for child in node.children: if child.type != syms.simple_stmt: break first_child = child.children[0] if isinstance(first_child, Leaf): # Continue looking if we see a docstring; otherwise stop. if ( len(child.children) == 2 and first_child.type == token.STRING and child.children[1].type == token.NEWLINE ): continue break elif first_child.type == syms.import_from: module_name = first_child.children[1] if not isinstance(module_name, Leaf) or module_name.value != "__future__": break imports |= set(get_imports_from_children(first_child.children[3:])) else: break return imports @lru_cache() def get_gitignore(root: Path) -> PathSpec: """ Return a PathSpec matching gitignore content if present.""" gitignore = root / ".gitignore" lines: List[str] = [] if gitignore.is_file(): with gitignore.open() as gf: lines = gf.readlines() return PathSpec.from_lines("gitwildmatch", lines) def gen_python_files_in_dir( path: Path, root: Path, include: Pattern[str], exclude: Pattern[str], report: "Report", gitignore: PathSpec, ) -> Iterator[Path]: """Generate all files under `path` whose paths are not excluded by the `exclude` regex, but are included by the `include` regex. Symbolic links pointing outside of the `root` directory are ignored. `report` is where output about exclusions goes. """ assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" for child in path.iterdir(): # First ignore files matching .gitignore if gitignore.match_file(child.as_posix()): report.path_ignored(child, f"matches the .gitignore file content") continue # Then ignore with `exclude` option. try: normalized_path = "/" + child.resolve().relative_to(root).as_posix() except OSError as e: report.path_ignored(child, f"cannot be read because {e}") continue except ValueError: if child.is_symlink(): report.path_ignored( child, f"is a symbolic link that points outside {root}" ) continue raise if child.is_dir(): normalized_path += "/" exclude_match = exclude.search(normalized_path) if exclude_match and exclude_match.group(0): report.path_ignored(child, f"matches the --exclude regular expression") continue if child.is_dir(): yield from gen_python_files_in_dir( child, root, include, exclude, report, gitignore ) elif child.is_file(): include_match = include.search(normalized_path) if include_match: yield child @lru_cache() def find_project_root(srcs: Iterable[str]) -> Path: """Return a directory containing .git, .hg, or pyproject.toml. That directory can be one of the directories passed in `srcs` or their common parent. If no directory in the tree contains a marker that would specify it's the project root, the root of the file system is returned. """ if not srcs: return Path("/").resolve() common_base = min(Path(src).resolve() for src in srcs) if common_base.is_dir(): # Append a fake file so `parents` below returns `common_base_dir`, too. common_base /= "fake-file" for directory in common_base.parents: if (directory / ".git").is_dir(): return directory if (directory / ".hg").is_dir(): return directory if (directory / "pyproject.toml").is_file(): return directory return directory @dataclass class Report: """Provides a reformatting counter. Can be rendered with `str(report)`.""" check: bool = False quiet: bool = False verbose: bool = False change_count: int = 0 same_count: int = 0 failure_count: int = 0 def done(self, src: Path, changed: Changed) -> None: """Increment the counter for successful reformatting. Write out a message.""" if changed is Changed.YES: reformatted = "would reformat" if self.check else "reformatted" if self.verbose or not self.quiet: out(f"{reformatted} {src}") self.change_count += 1 else: if self.verbose: if changed is Changed.NO: msg = f"{src} already well formatted, good job." else: msg = f"{src} wasn't modified on disk since last run." out(msg, bold=False) self.same_count += 1 def failed(self, src: Path, message: str) -> None: """Increment the counter for failed reformatting. Write out a message.""" err(f"error: cannot format {src}: {message}") self.failure_count += 1 def path_ignored(self, path: Path, message: str) -> None: if self.verbose: out(f"{path} ignored: {message}", bold=False) @property def return_code(self) -> int: """Return the exit code that the app should use. This considers the current state of changed files and failures: - if there were any failures, return 123; - if any files were changed and --check is being used, return 1; - otherwise return 0. """ # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with # 126 we have special return codes reserved by the shell. if self.failure_count: return 123 elif self.change_count and self.check: return 1 return 0 def __str__(self) -> str: """Render a color report of the current state. Use `click.unstyle` to remove colors. """ if self.check: reformatted = "would be reformatted" unchanged = "would be left unchanged" failed = "would fail to reformat" else: reformatted = "reformatted" unchanged = "left unchanged" failed = "failed to reformat" report = [] if self.change_count: s = "s" if self.change_count > 1 else "" report.append( click.style(f"{self.change_count} file{s} {reformatted}", bold=True) ) if self.same_count: s = "s" if self.same_count > 1 else "" report.append(f"{self.same_count} file{s} {unchanged}") if self.failure_count: s = "s" if self.failure_count > 1 else "" report.append( click.style(f"{self.failure_count} file{s} {failed}", fg="red") ) return ", ".join(report) + "." def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]: filename = "<unknown>" if sys.version_info >= (3, 8): # TODO: support Python 4+ ;) for minor_version in range(sys.version_info[1], 4, -1): try: return ast.parse(src, filename, feature_version=(3, minor_version)) except SyntaxError: continue else: for feature_version in (7, 6): try: return ast3.parse(src, filename, feature_version=feature_version) except SyntaxError: continue return ast27.parse(src) def _fixup_ast_constants( node: Union[ast.AST, ast3.AST, ast27.AST] ) -> Union[ast.AST, ast3.AST, ast27.AST]: """Map ast nodes deprecated in 3.8 to Constant.""" if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)): return ast.Constant(value=node.s) if isinstance(node, (ast.Num, ast3.Num, ast27.Num)): return ast.Constant(value=node.n) if isinstance(node, (ast.NameConstant, ast3.NameConstant)): return ast.Constant(value=node.value) return node def assert_equivalent(src: str, dst: str) -> None: """Raise AssertionError if `src` and `dst` aren't equivalent.""" def _v(node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0) -> Iterator[str]: """Simple visitor generating strings to compare ASTs by content.""" node = _fixup_ast_constants(node) yield f"{' ' * depth}{node.__class__.__name__}(" for field in sorted(node._fields): # noqa: F402 # TypeIgnore has only one field 'lineno' which breaks this comparison type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore) if sys.version_info >= (3, 8): type_ignore_classes += (ast.TypeIgnore,) if isinstance(node, type_ignore_classes): break try: value = getattr(node, field) except AttributeError: continue yield f"{' ' * (depth+1)}{field}=" if isinstance(value, list): for item in value: # Ignore nested tuples within del statements, because we may insert # parentheses and they change the AST. if ( field == "targets" and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete)) and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple)) ): for item in item.elts: yield from _v(item, depth + 2) elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)): yield from _v(item, depth + 2) elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)): yield from _v(value, depth + 2) else: yield f"{' ' * (depth+2)}{value!r}, # {value.__class__.__name__}" yield f"{' ' * depth}) # /{node.__class__.__name__}" try: src_ast = parse_ast(src) except Exception as exc: raise AssertionError( f"cannot use --safe with this file; failed to parse source file. " f"AST error message: {exc}" ) try: dst_ast = parse_ast(dst) except Exception as exc: log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst) raise AssertionError( f"INTERNAL ERROR: Black produced invalid code: {exc}. " f"Please report a bug on https://github.com/psf/black/issues. " f"This invalid output might be helpful: {log}" ) from None src_ast_str = "\n".join(_v(src_ast)) dst_ast_str = "\n".join(_v(dst_ast)) if src_ast_str != dst_ast_str: log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst")) raise AssertionError( f"INTERNAL ERROR: Black produced code that is not equivalent to " f"the source. " f"Please report a bug on https://github.com/psf/black/issues. " f"This diff might be helpful: {log}" ) from None def assert_stable(src: str, dst: str, mode: FileMode) -> None: """Raise AssertionError if `dst` reformats differently the second time.""" newdst = format_str(dst, mode=mode) if dst != newdst: log = dump_to_file( diff(src, dst, "source", "first pass"), diff(dst, newdst, "first pass", "second pass"), ) raise AssertionError( f"INTERNAL ERROR: Black produced different code on the second pass " f"of the formatter. " f"Please report a bug on https://github.com/psf/black/issues. " f"This diff might be helpful: {log}" ) from None @mypyc_attr(patchable=True) def dump_to_file(*output: str) -> str: """Dump `output` to a temporary file. Return path to the file.""" with tempfile.NamedTemporaryFile( mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" ) as f: for lines in output: f.write(lines) if lines and lines[-1] != "\n": f.write("\n") return f.name @contextmanager def nullcontext() -> Iterator[None]: """Return an empty context manager. To be used like `nullcontext` in Python 3.7. """ yield def diff(a: str, b: str, a_name: str, b_name: str) -> str: """Return a unified diff string between strings `a` and `b`.""" import difflib a_lines = [line + "\n" for line in a.split("\n")] b_lines = [line + "\n" for line in b.split("\n")] return "".join( difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5) ) def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None: """asyncio signal handler that cancels all `tasks` and reports to stderr.""" err("Aborted!") for task in tasks: task.cancel() def shutdown(loop: asyncio.AbstractEventLoop) -> None: """Cancel all pending tasks on `loop`, wait for them, and close the loop.""" try: if sys.version_info[:2] >= (3, 7): all_tasks = asyncio.all_tasks else: all_tasks = asyncio.Task.all_tasks # This part is borrowed from asyncio/runners.py in Python 3.7b2. to_cancel = [task for task in all_tasks(loop) if not task.done()] if not to_cancel: return for task in to_cancel: task.cancel() loop.run_until_complete( asyncio.gather(*to_cancel, loop=loop, return_exceptions=True) ) finally: # `concurrent.futures.Future` objects cannot be cancelled once they # are already running. There might be some when the `shutdown()` happened. # Silence their logger's spew about the event loop being closed. cf_logger = logging.getLogger("concurrent.futures") cf_logger.setLevel(logging.CRITICAL) loop.close() def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: """Replace `regex` with `replacement` twice on `original`. This is used by string normalization to perform replaces on overlapping matches. """ return regex.sub(replacement, regex.sub(replacement, original)) def re_compile_maybe_verbose(regex: str) -> Pattern[str]: """Compile a regular expression string in `regex`. If it contains newlines, use verbose mode. """ if "\n" in regex: regex = "(?x)" + regex compiled: Pattern[str] = re.compile(regex) return compiled def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: """Like `reversed(enumerate(sequence))` if that were possible.""" index = len(sequence) - 1 for element in reversed(sequence): yield (index, element) index -= 1 def enumerate_with_length( line: Line, reversed: bool = False ) -> Iterator[Tuple[Index, Leaf, int]]: """Return an enumeration of leaves with their length. Stops prematurely on multiline strings and standalone comments. """ op = cast( Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], enumerate_reversed if reversed else enumerate, ) for index, leaf in op(line.leaves): length = len(leaf.prefix) + len(leaf.value) if "\n" in leaf.value: return # Multiline strings, we can't continue. for comment in line.comments_after(leaf): length += len(comment.value) yield index, leaf, length def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool: """Return True if `line` is no longer than `line_length`. Uses the provided `line_str` rendering, if any, otherwise computes a new one. """ if not line_str: line_str = str(line).strip("\n") return ( len(line_str) <= line_length and "\n" not in line_str # multiline strings and not line.contains_standalone_comments() ) def can_be_split(line: Line) -> bool: """Return False if the line cannot be split *for sure*. This is not an exhaustive search but a cheap heuristic that we can use to avoid some unfortunate formattings (mostly around wrapping unsplittable code in unnecessary parentheses). """ leaves = line.leaves if len(leaves) < 2: return False if leaves[0].type == token.STRING and leaves[1].type == token.DOT: call_count = 0 dot_count = 0 next = leaves[-1] for leaf in leaves[-2::-1]: if leaf.type in OPENING_BRACKETS: if next.type not in CLOSING_BRACKETS: return False call_count += 1 elif leaf.type == token.DOT: dot_count += 1 elif leaf.type == token.NAME: if not (next.type == token.DOT or next.type in OPENING_BRACKETS): return False elif leaf.type not in CLOSING_BRACKETS: return False if dot_count > 1 and call_count > 1: return False return True def can_omit_invisible_parens(line: Line, line_length: int) -> bool: """Does `line` have a shape safe to reformat without optional parens around it? Returns True for only a subset of potentially nice looking formattings but the point is to not return false positives that end up producing lines that are too long. """ bt = line.bracket_tracker if not bt.delimiters: # Without delimiters the optional parentheses are useless. return True max_priority = bt.max_delimiter_priority() if bt.delimiter_count_with_priority(max_priority) > 1: # With more than one delimiter of a kind the optional parentheses read better. return False if max_priority == DOT_PRIORITY: # A single stranded method call doesn't require optional parentheses. return True assert len(line.leaves) >= 2, "Stranded delimiter" first = line.leaves[0] second = line.leaves[1] penultimate = line.leaves[-2] last = line.leaves[-1] # With a single delimiter, omit if the expression starts or ends with # a bracket. if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: remainder = False length = 4 * line.depth for _index, leaf, leaf_length in enumerate_with_length(line): if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: remainder = True if remainder: length += leaf_length if length > line_length: break if leaf.type in OPENING_BRACKETS: # There are brackets we can further split on. remainder = False else: # checked the entire string and line length wasn't exceeded if len(line.leaves) == _index + 1: return True # Note: we are not returning False here because a line might have *both* # a leading opening bracket and a trailing closing bracket. If the # opening bracket doesn't match our rule, maybe the closing will. if ( last.type == token.RPAR or last.type == token.RBRACE or ( # don't use indexing for omitting optional parentheses; # it looks weird last.type == token.RSQB and last.parent and last.parent.type != syms.trailer ) ): if penultimate.type in OPENING_BRACKETS: # Empty brackets don't help. return False if is_multiline_string(first): # Additional wrapping of a multiline string in this situation is # unnecessary. return True length = 4 * line.depth seen_other_brackets = False for _index, leaf, leaf_length in enumerate_with_length(line): length += leaf_length if leaf is last.opening_bracket: if seen_other_brackets or length <= line_length: return True elif leaf.type in OPENING_BRACKETS: # There are brackets we can further split on. seen_other_brackets = True return False def get_cache_file(mode: FileMode) -> Path: return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" def read_cache(mode: FileMode) -> Cache: """Read the cache if it exists and is well formed. If it is not well formed, the call to write_cache later should resolve the issue. """ cache_file = get_cache_file(mode) if not cache_file.exists(): return {} with cache_file.open("rb") as fobj: try: cache: Cache = pickle.load(fobj) except (pickle.UnpicklingError, ValueError): return {} return cache def get_cache_info(path: Path) -> CacheInfo: """Return the information used to check if a file is already formatted or not.""" stat = path.stat() return stat.st_mtime, stat.st_size def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: """Split an iterable of paths in `sources` into two sets. The first contains paths of files that modified on disk or are not in the cache. The other contains paths to non-modified files. """ todo, done = set(), set() for src in sources: src = src.resolve() if cache.get(src) != get_cache_info(src): todo.add(src) else: done.add(src) return todo, done def write_cache(cache: Cache, sources: Iterable[Path], mode: FileMode) -> None: """Update the cache file.""" cache_file = get_cache_file(mode) try: CACHE_DIR.mkdir(parents=True, exist_ok=True) new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}} with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f: pickle.dump(new_cache, f, protocol=4) os.replace(f.name, cache_file) except OSError: pass def patch_click() -> None: """Make Click not crash. On certain misconfigured environments, Python 3 selects the ASCII encoding as the default which restricts paths that it can access during the lifetime of the application. Click refuses to work in this scenario by raising a RuntimeError. In case of Black the likelihood that non-ASCII characters are going to be used in file paths is minimal since it's Python source code. Moreover, this crash was spurious on Python 3.7 thanks to PEP 538 and PEP 540. """ try: from click import core from click import _unicodefun # type: ignore except ModuleNotFoundError: return for module in (core, _unicodefun): if hasattr(module, "_verify_python3_env"): module._verify_python3_env = lambda: None def patched_main() -> None: freeze_support() patch_click() main() if __name__ == "__main__": patched_main()
33.952495
122
0.608639
import ast import asyncio from concurrent.futures import Executor, ProcessPoolExecutor from contextlib import contextmanager from datetime import datetime from enum import Enum from functools import lru_cache, partial, wraps import io import itertools import logging from multiprocessing import Manager, freeze_support import os from pathlib import Path import pickle import regex as re import signal import sys import tempfile import tokenize import traceback from typing import ( Any, Callable, Collection, Dict, Generator, Generic, Iterable, Iterator, List, Optional, Pattern, Sequence, Set, Tuple, TypeVar, Union, cast, ) from typing_extensions import Final from mypy_extensions import mypyc_attr from appdirs import user_cache_dir from dataclasses import dataclass, field, replace import click import toml from typed_ast import ast3, ast27 from pathspec import PathSpec from blib2to3.pytree import Node, Leaf, type_repr from blib2to3 import pygram, pytree from blib2to3.pgen2 import driver, token from blib2to3.pgen2.grammar import Grammar from blib2to3.pgen2.parse import ParseError from _black_version import version as __version__ DEFAULT_LINE_LENGTH = 88 DEFAULT_EXCLUDES = r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist)/" DEFAULT_INCLUDES = r"\.pyi?$" CACHE_DIR = Path(user_cache_dir("black", version=__version__)) FileContent = str Encoding = str NewLine = str Depth = int NodeType = int LeafID = int Priority = int Index = int LN = Union[Leaf, Node] SplitFunc = Callable[["Line", Collection["Feature"]], Iterator["Line"]] Timestamp = float FileSize = int CacheInfo = Tuple[Timestamp, FileSize] Cache = Dict[Path, CacheInfo] out = partial(click.secho, bold=True, err=True) err = partial(click.secho, fg="red", err=True) pygram.initialize(CACHE_DIR) syms = pygram.python_symbols class NothingChanged(UserWarning): class CannotSplit(Exception): class InvalidInput(ValueError): class WriteBack(Enum): NO = 0 YES = 1 DIFF = 2 CHECK = 3 @classmethod def from_configuration(cls, *, check: bool, diff: bool) -> "WriteBack": if check and not diff: return cls.CHECK return cls.DIFF if diff else cls.YES class Changed(Enum): NO = 0 CACHED = 1 YES = 2 class TargetVersion(Enum): PY27 = 2 PY33 = 3 PY34 = 4 PY35 = 5 PY36 = 6 PY37 = 7 PY38 = 8 def is_python2(self) -> bool: return self is TargetVersion.PY27 PY36_VERSIONS = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38} class Feature(Enum): UNICODE_LITERALS = 1 F_STRINGS = 2 NUMERIC_UNDERSCORES = 3 TRAILING_COMMA_IN_CALL = 4 TRAILING_COMMA_IN_DEF = 5 ASYNC_IDENTIFIERS = 6 ASYNC_KEYWORDS = 7 ASSIGNMENT_EXPRESSIONS = 8 POS_ONLY_ARGUMENTS = 9 VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS}, TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, TargetVersion.PY35: { Feature.UNICODE_LITERALS, Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS, }, TargetVersion.PY36: { Feature.UNICODE_LITERALS, Feature.F_STRINGS, Feature.NUMERIC_UNDERSCORES, Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF, Feature.ASYNC_IDENTIFIERS, }, TargetVersion.PY37: { Feature.UNICODE_LITERALS, Feature.F_STRINGS, Feature.NUMERIC_UNDERSCORES, Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF, Feature.ASYNC_KEYWORDS, }, TargetVersion.PY38: { Feature.UNICODE_LITERALS, Feature.F_STRINGS, Feature.NUMERIC_UNDERSCORES, Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF, Feature.ASYNC_KEYWORDS, Feature.ASSIGNMENT_EXPRESSIONS, Feature.POS_ONLY_ARGUMENTS, }, } @dataclass class FileMode: target_versions: Set[TargetVersion] = field(default_factory=set) line_length: int = DEFAULT_LINE_LENGTH string_normalization: bool = True is_pyi: bool = False def get_cache_key(self) -> str: if self.target_versions: version_str = ",".join( str(version.value) for version in sorted(self.target_versions, key=lambda v: v.value) ) else: version_str = "-" parts = [ version_str, str(self.line_length), str(int(self.string_normalization)), str(int(self.is_pyi)), ] return ".".join(parts) def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) def read_pyproject_toml( ctx: click.Context, param: click.Parameter, value: Union[str, int, bool, None] ) -> Optional[str]: assert not isinstance(value, (int, bool)), "Invalid parameter type passed" if not value: root = find_project_root(ctx.params.get("src", ())) path = root / "pyproject.toml" if path.is_file(): value = str(path) else: return None try: pyproject_toml = toml.load(value) config = pyproject_toml.get("tool", {}).get("black", {}) except (toml.TomlDecodeError, OSError) as e: raise click.FileError( filename=value, hint=f"Error reading configuration file: {e}" ) if not config: return None if ctx.default_map is None: ctx.default_map = {} ctx.default_map.update( ("--", "").replace("-", "_"): v for k, v in config.items()} ) return value def target_version_option_callback( c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...] ) -> List[TargetVersion]: return [TargetVersion[val.upper()] for val in v] @click.command(context_settings=dict(help_option_names=["-h", "--help"])) @click.option("-c", "--code", type=str, help="Format the code passed in as a string.") @click.option( "-l", "--line-length", type=int, default=DEFAULT_LINE_LENGTH, help="How many characters per line to allow.", show_default=True, ) @click.option( "-t", "--target-version", type=click.Choice([v.name.lower() for v in TargetVersion]), callback=target_version_option_callback, multiple=True, help=( "Python versions that should be supported by Black's output. [default: " "per-file auto-detection]" ), ) @click.option( "--py36", is_flag=True, help=( "Allow using Python 3.6-only syntax on all input files. This will put " "trailing commas in function signatures and calls also after *args and " "**kwargs. Deprecated; use --target-version instead. " "[default: per-file auto-detection]" ), ) @click.option( "--pyi", is_flag=True, help=( "Format all input files like typing stubs regardless of file extension " "(useful when piping source on standard input)." ), ) @click.option( "-S", "--skip-string-normalization", is_flag=True, help="Don't normalize string quotes or prefixes.", ) @click.option( "--check", is_flag=True, help=( "Don't write the files back, just return the status. Return code 0 " "means nothing would change. Return code 1 means some files would be " "reformatted. Return code 123 means there was an internal error." ), ) @click.option( "--diff", is_flag=True, help="Don't write the files back, just output a diff for each file on stdout.", ) @click.option( "--fast/--safe", is_flag=True, help="If --fast given, skip temporary sanity checks. [default: --safe]", ) @click.option( "--include", type=str, default=DEFAULT_INCLUDES, help=( "A regular expression that matches files and directories that should be " "included on recursive searches. An empty value means all files are " "included regardless of the name. Use forward slashes for directories on " "all platforms (Windows, too). Exclusions are calculated first, inclusions " "later." ), show_default=True, ) @click.option( "--exclude", type=str, default=DEFAULT_EXCLUDES, help=( "A regular expression that matches files and directories that should be " "excluded on recursive searches. An empty value means no paths are excluded. " "Use forward slashes for directories on all platforms (Windows, too). " "Exclusions are calculated first, inclusions later." ), show_default=True, ) @click.option( "-q", "--quiet", is_flag=True, help=( "Don't emit non-error messages to stderr. Errors are still emitted; " "silence those with 2>/dev/null." ), ) @click.option( "-v", "--verbose", is_flag=True, help=( "Also emit messages to stderr about files that were not changed or were " "ignored due to --exclude=." ), ) @click.version_option(version=__version__) @click.argument( "src", nargs=-1, type=click.Path( exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True ), is_eager=True, ) @click.option( "--config", type=click.Path( exists=False, file_okay=True, dir_okay=False, readable=True, allow_dash=False ), is_eager=True, callback=read_pyproject_toml, help="Read configuration from PATH.", ) @click.pass_context def main( ctx: click.Context, code: Optional[str], line_length: int, target_version: List[TargetVersion], check: bool, diff: bool, fast: bool, pyi: bool, py36: bool, skip_string_normalization: bool, quiet: bool, verbose: bool, include: str, exclude: str, src: Tuple[str, ...], config: Optional[str], ) -> None: write_back = WriteBack.from_configuration(check=check, diff=diff) if target_version: if py36: err(f"Cannot use both --target-version and --py36") ctx.exit(2) else: versions = set(target_version) elif py36: err( "--py36 is deprecated and will be removed in a future version. " "Use --target-version py36 instead." ) versions = PY36_VERSIONS else: # We'll autodetect later. versions = set() mode = FileMode( target_versions=versions, line_length=line_length, is_pyi=pyi, string_normalization=not skip_string_normalization, ) if config and verbose: out(f"Using configuration from {config}.", bold=False, fg="blue") if code is not None: print(format_str(code, mode=mode)) ctx.exit(0) try: include_regex = re_compile_maybe_verbose(include) except re.error: err(f"Invalid regular expression for include given: {include!r}") ctx.exit(2) try: exclude_regex = re_compile_maybe_verbose(exclude) except re.error: err(f"Invalid regular expression for exclude given: {exclude!r}") ctx.exit(2) report = Report(check=check, quiet=quiet, verbose=verbose) root = find_project_root(src) sources: Set[Path] = set() path_empty(src, quiet, verbose, ctx) for s in src: p = Path(s) if p.is_dir(): sources.update( gen_python_files_in_dir( p, root, include_regex, exclude_regex, report, get_gitignore(root) ) ) elif p.is_file() or s == "-": sources.add(p) else: err(f"invalid path: {s}") if len(sources) == 0: if verbose or not quiet: out("No Python files are present to be formatted. Nothing to do 😴") ctx.exit(0) if len(sources) == 1: reformat_one( src=sources.pop(), fast=fast, write_back=write_back, mode=mode, report=report, ) else: reformat_many( sources=sources, fast=fast, write_back=write_back, mode=mode, report=report ) if verbose or not quiet: out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨") click.secho(str(report), err=True) ctx.exit(report.return_code) def path_empty( src: Tuple[str, ...], quiet: bool, verbose: bool, ctx: click.Context ) -> None: if not src: if verbose or not quiet: out("No Path provided. Nothing to do 😴") ctx.exit(0) def reformat_one( src: Path, fast: bool, write_back: WriteBack, mode: FileMode, report: "Report" ) -> None: try: changed = Changed.NO if not src.is_file() and str(src) == "-": if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode): changed = Changed.YES else: cache: Cache = {} if write_back != WriteBack.DIFF: cache = read_cache(mode) res_src = src.resolve() if res_src in cache and cache[res_src] == get_cache_info(res_src): changed = Changed.CACHED if changed is not Changed.CACHED and format_file_in_place( src, fast=fast, write_back=write_back, mode=mode ): changed = Changed.YES if (write_back is WriteBack.YES and changed is not Changed.CACHED) or ( write_back is WriteBack.CHECK and changed is Changed.NO ): write_cache(cache, [src], mode) report.done(src, changed) except Exception as exc: report.failed(src, str(exc)) def reformat_many( sources: Set[Path], fast: bool, write_back: WriteBack, mode: FileMode, report: "Report", ) -> None: loop = asyncio.get_event_loop() worker_count = os.cpu_count() if sys.platform == "win32": # Work around https://bugs.python.org/issue26903 worker_count = min(worker_count, 61) executor = ProcessPoolExecutor(max_workers=worker_count) try: loop.run_until_complete( schedule_formatting( sources=sources, fast=fast, write_back=write_back, mode=mode, report=report, loop=loop, executor=executor, ) ) finally: shutdown(loop) executor.shutdown() async def schedule_formatting( sources: Set[Path], fast: bool, write_back: WriteBack, mode: FileMode, report: "Report", loop: asyncio.AbstractEventLoop, executor: Executor, ) -> None: cache: Cache = {} if write_back != WriteBack.DIFF: cache = read_cache(mode) sources, cached = filter_cached(cache, sources) for src in sorted(cached): report.done(src, Changed.CACHED) if not sources: return cancelled = [] sources_to_cache = [] lock = None if write_back == WriteBack.DIFF: # For diff output, we need locks to ensure we don't interleave output manager = Manager() lock = manager.Lock() tasks = { asyncio.ensure_future( loop.run_in_executor( executor, format_file_in_place, src, fast, mode, write_back, lock ) ): src for src in sorted(sources) } pending: Iterable["asyncio.Future[bool]"] = tasks.keys() try: loop.add_signal_handler(signal.SIGINT, cancel, pending) loop.add_signal_handler(signal.SIGTERM, cancel, pending) except NotImplementedError: pass while pending: done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) for task in done: src = tasks.pop(task) if task.cancelled(): cancelled.append(task) elif task.exception(): report.failed(src, str(task.exception())) else: changed = Changed.YES if task.result() else Changed.NO if write_back is WriteBack.YES or ( write_back is WriteBack.CHECK and changed is Changed.NO ): sources_to_cache.append(src) report.done(src, changed) if cancelled: await asyncio.gather(*cancelled, loop=loop, return_exceptions=True) if sources_to_cache: write_cache(cache, sources_to_cache, mode) def format_file_in_place( src: Path, fast: bool, mode: FileMode, write_back: WriteBack = WriteBack.NO, lock: Any = None, ) -> bool: if src.suffix == ".pyi": mode = replace(mode, is_pyi=True) then = datetime.utcfromtimestamp(src.stat().st_mtime) with open(src, "rb") as buf: src_contents, encoding, newline = decode_bytes(buf.read()) try: dst_contents = format_file_contents(src_contents, fast=fast, mode=mode) except NothingChanged: return False if write_back == WriteBack.YES: with open(src, "w", encoding=encoding, newline=newline) as f: f.write(dst_contents) elif write_back == WriteBack.DIFF: now = datetime.utcnow() src_name = f"{src}\t{then} +0000" dst_name = f"{src}\t{now} +0000" diff_contents = diff(src_contents, dst_contents, src_name, dst_name) with lock or nullcontext(): f = io.TextIOWrapper( sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True, ) f.write(diff_contents) f.detach() return True def format_stdin_to_stdout( fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: FileMode ) -> bool: then = datetime.utcnow() src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) dst = src try: dst = format_file_contents(src, fast=fast, mode=mode) return True except NothingChanged: return False finally: f = io.TextIOWrapper( sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True ) if write_back == WriteBack.YES: f.write(dst) elif write_back == WriteBack.DIFF: now = datetime.utcnow() src_name = f"STDIN\t{then} +0000" dst_name = f"STDOUT\t{now} +0000" f.write(diff(src, dst, src_name, dst_name)) f.detach() def format_file_contents( src_contents: str, *, fast: bool, mode: FileMode ) -> FileContent: if src_contents.strip() == "": raise NothingChanged dst_contents = format_str(src_contents, mode=mode) if src_contents == dst_contents: raise NothingChanged if not fast: assert_equivalent(src_contents, dst_contents) assert_stable(src_contents, dst_contents, mode=mode) return dst_contents def format_str(src_contents: str, *, mode: FileMode) -> FileContent: src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) dst_contents = [] future_imports = get_future_imports(src_node) if mode.target_versions: versions = mode.target_versions else: versions = detect_target_versions(src_node) normalize_fmt_off(src_node) lines = LineGenerator( remove_u_prefix="unicode_literals" in future_imports or supports_feature(versions, Feature.UNICODE_LITERALS), is_pyi=mode.is_pyi, normalize_strings=mode.string_normalization, ) elt = EmptyLineTracker(is_pyi=mode.is_pyi) empty_line = Line() after = 0 split_line_features = { feature for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF} if supports_feature(versions, feature) } for current_line in lines.visit(src_node): for _ in range(after): dst_contents.append(str(empty_line)) before, after = elt.maybe_empty_lines(current_line) for _ in range(before): dst_contents.append(str(empty_line)) for line in split_line( current_line, line_length=mode.line_length, features=split_line_features ): dst_contents.append(str(line)) return "".join(dst_contents) def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]: srcbuf = io.BytesIO(src) encoding, lines = tokenize.detect_encoding(srcbuf.readline) if not lines: return "", encoding, "\n" newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n" srcbuf.seek(0) with io.TextIOWrapper(srcbuf, encoding) as tiow: return tiow.read(), encoding, newline def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: if not target_versions: return [ pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, pygram.python_grammar_no_print_statement_no_exec_statement, pygram.python_grammar_no_print_statement, pygram.python_grammar, ] if all(version.is_python2() for version in target_versions): return [ pygram.python_grammar_no_print_statement, pygram.python_grammar, ] grammars = [] if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS): grammars.append( pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords ) if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) return grammars def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: if src_txt[-1:] != "\n": src_txt += "\n" for grammar in get_grammars(set(target_versions)): drv = driver.Driver(grammar, pytree.convert) try: result = drv.parse_string(src_txt, True) break except ParseError as pe: lineno, column = pe.context[1] lines = src_txt.splitlines() try: faulty_line = lines[lineno - 1] except IndexError: faulty_line = "<line number missing in source>" exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}") else: raise exc from None if isinstance(result, Leaf): result = Node(syms.file_input, [result]) return result def lib2to3_unparse(node: Node) -> str: code = str(node) return code T = TypeVar("T") class Visitor(Generic[T]): def visit(self, node: LN) -> Iterator[T]: if node.type < 256: name = token.tok_name[node.type] else: name = str(type_repr(node.type)) visitf = getattr(self, f"visit_{name}", None) if visitf: yield from visitf(node) else: yield from self.visit_default(node) def visit_default(self, node: LN) -> Iterator[T]: if isinstance(node, Node): for child in node.children: yield from self.visit(child) @dataclass class DebugVisitor(Visitor[T]): tree_depth: int = 0 def visit_default(self, node: LN) -> Iterator[T]: indent = " " * (2 * self.tree_depth) if isinstance(node, Node): _type = type_repr(node.type) out(f"{indent}{_type}", fg="yellow") self.tree_depth += 1 for child in node.children: yield from self.visit(child) self.tree_depth -= 1 out(f"{indent}/{_type}", fg="yellow", bold=False) else: _type = token.tok_name.get(node.type, str(node.type)) out(f"{indent}{_type}", fg="blue", nl=False) if node.prefix: # that delegates to the first child anyway. out(f" {node.prefix!r}", fg="green", bold=False, nl=False) out(f" {node.value!r}", fg="blue", bold=False) @classmethod def show(cls, code: Union[str, Leaf, Node]) -> None: v: DebugVisitor[None] = DebugVisitor() if isinstance(code, str): code = lib2to3_parse(code) list(v.visit(code)) WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE} STATEMENT: Final = { syms.if_stmt, syms.while_stmt, syms.for_stmt, syms.try_stmt, syms.except_clause, syms.with_stmt, syms.funcdef, syms.classdef, } STANDALONE_COMMENT: Final = 153 token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" LOGIC_OPERATORS: Final = {"and", "or"} COMPARATORS: Final = { token.LESS, token.GREATER, token.EQEQUAL, token.NOTEQUAL, token.LESSEQUAL, token.GREATEREQUAL, } MATH_OPERATORS: Final = { token.VBAR, token.CIRCUMFLEX, token.AMPER, token.LEFTSHIFT, token.RIGHTSHIFT, token.PLUS, token.MINUS, token.STAR, token.SLASH, token.DOUBLESLASH, token.PERCENT, token.AT, token.TILDE, token.DOUBLESTAR, } STARS: Final = {token.STAR, token.DOUBLESTAR} VARARGS_SPECIALS: Final = STARS | {token.SLASH} VARARGS_PARENTS: Final = { syms.arglist, syms.argument, # double star in arglist syms.trailer, # single argument to call syms.typedargslist, syms.varargslist, # lambdas } UNPACKING_PARENTS: Final = { syms.atom, # single element of a list or set literal syms.dictsetmaker, syms.listmaker, syms.testlist_gexp, syms.testlist_star_expr, } TEST_DESCENDANTS: Final = { syms.test, syms.lambdef, syms.or_test, syms.and_test, syms.not_test, syms.comparison, syms.star_expr, syms.expr, syms.xor_expr, syms.and_expr, syms.shift_expr, syms.arith_expr, syms.trailer, syms.term, syms.power, } ASSIGNMENTS: Final = { "=", "+=", "-=", "*=", "@=", "/=", "%=", "&=", "|=", "^=", "<<=", ">>=", "**=", "//=", } COMPREHENSION_PRIORITY: Final = 20 COMMA_PRIORITY: Final = 18 TERNARY_PRIORITY: Final = 16 LOGIC_PRIORITY: Final = 14 STRING_PRIORITY: Final = 12 COMPARATOR_PRIORITY: Final = 10 MATH_PRIORITIES: Final = { token.VBAR: 9, token.CIRCUMFLEX: 8, token.AMPER: 7, token.LEFTSHIFT: 6, token.RIGHTSHIFT: 6, token.PLUS: 5, token.MINUS: 5, token.STAR: 4, token.SLASH: 4, token.DOUBLESLASH: 4, token.PERCENT: 4, token.AT: 4, token.TILDE: 3, token.DOUBLESTAR: 2, } DOT_PRIORITY: Final = 1 @dataclass class BracketTracker: depth: int = 0 bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict) delimiters: Dict[LeafID, Priority] = field(default_factory=dict) previous: Optional[Leaf] = None _for_loop_depths: List[int] = field(default_factory=list) _lambda_argument_depths: List[int] = field(default_factory=list) def mark(self, leaf: Leaf) -> None: if leaf.type == token.COMMENT: return self.maybe_decrement_after_for_loop_variable(leaf) self.maybe_decrement_after_lambda_arguments(leaf) if leaf.type in CLOSING_BRACKETS: self.depth -= 1 opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) leaf.opening_bracket = opening_bracket leaf.bracket_depth = self.depth if self.depth == 0: delim = is_split_before_delimiter(leaf, self.previous) if delim and self.previous is not None: self.delimiters[id(self.previous)] = delim else: delim = is_split_after_delimiter(leaf, self.previous) if delim: self.delimiters[id(leaf)] = delim if leaf.type in OPENING_BRACKETS: self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf self.depth += 1 self.previous = leaf self.maybe_increment_lambda_arguments(leaf) self.maybe_increment_for_loop_variable(leaf) def any_open_brackets(self) -> bool: return bool(self.bracket_match) def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: return max(v for k, v in self.delimiters.items() if k not in exclude) def delimiter_count_with_priority(self, priority: Priority = 0) -> int: if not self.delimiters: return 0 priority = priority or self.max_delimiter_priority() return sum(1 for p in self.delimiters.values() if p == priority) def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: if leaf.type == token.NAME and leaf.value == "for": self.depth += 1 self._for_loop_depths.append(self.depth) return True return False def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: if ( self._for_loop_depths and self._for_loop_depths[-1] == self.depth and leaf.type == token.NAME and leaf.value == "in" ): self.depth -= 1 self._for_loop_depths.pop() return True return False def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: if leaf.type == token.NAME and leaf.value == "lambda": self.depth += 1 self._lambda_argument_depths.append(self.depth) return True return False def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: if ( self._lambda_argument_depths and self._lambda_argument_depths[-1] == self.depth and leaf.type == token.COLON ): self.depth -= 1 self._lambda_argument_depths.pop() return True return False def get_open_lsqb(self) -> Optional[Leaf]: return self.bracket_match.get((self.depth - 1, token.RSQB)) @dataclass class Line: depth: int = 0 leaves: List[Leaf] = field(default_factory=list) # keys ordered like `leaves` comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict) bracket_tracker: BracketTracker = field(default_factory=BracketTracker) inside_brackets: bool = False should_explode: bool = False def append(self, leaf: Leaf, preformatted: bool = False) -> None: has_value = leaf.type in BRACKETS or bool(leaf.value.strip()) if not has_value: return if token.COLON == leaf.type and self.is_class_paren_empty: del self.leaves[-2:] if self.leaves and not preformatted: # Note: at this point leaf.prefix should be empty except for # imports, for which we only preserve newlines. leaf.prefix += whitespace( leaf, complex_subscript=self.is_complex_subscript(leaf) ) if self.inside_brackets or not preformatted: self.bracket_tracker.mark(leaf) self.maybe_remove_trailing_comma(leaf) if not self.append_comment(leaf): self.leaves.append(leaf) def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: if self.bracket_tracker.depth == 0: if self.is_comment: raise ValueError("cannot append to standalone comments") if self.leaves and leaf.type == STANDALONE_COMMENT: raise ValueError( "cannot append standalone comments to a populated line" ) self.append(leaf, preformatted=preformatted) @property def is_comment(self) -> bool: return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT @property def is_decorator(self) -> bool: return bool(self) and self.leaves[0].type == token.AT @property def is_import(self) -> bool: return bool(self) and is_import(self.leaves[0]) @property def is_class(self) -> bool: return ( bool(self) and self.leaves[0].type == token.NAME and self.leaves[0].value == "class" ) @property def is_stub_class(self) -> bool: return self.is_class and self.leaves[-3:] == [ Leaf(token.DOT, ".") for _ in range(3) ] @property def is_collection_with_optional_trailing_comma(self) -> bool: if not self.leaves or len(self.leaves) < 4: return False # Look for and address a trailing colon. if self.leaves[-1].type == token.COLON: closer = self.leaves[-2] close_index = -2 else: closer = self.leaves[-1] close_index = -1 if closer.type not in CLOSING_BRACKETS or self.inside_brackets: return False if closer.type == token.RPAR: # Tuples require an extra check, because if there's only opener = closer.opening_bracket for _open_index, leaf in enumerate(self.leaves): if leaf is opener: break else: return False commas = 0 comma_depth = self.leaves[close_index - 1].bracket_depth for leaf in self.leaves[_open_index + 1 : close_index]: if leaf.bracket_depth == comma_depth and leaf.type == token.COMMA: commas += 1 if commas > 1: # We haven't looked yet for the trailing comma because return self.leaves[close_index - 1].type == token.COMMA elif commas == 1: return False if self.leaves[close_index - 1].type in CLOSING_BRACKETS: close_index -= 1 closer = self.leaves[close_index] if closer.type == token.RPAR: return False if self.leaves[close_index - 1].type != token.COMMA: return False return True @property def is_def(self) -> bool: try: first_leaf = self.leaves[0] except IndexError: return False try: second_leaf: Optional[Leaf] = self.leaves[1] except IndexError: second_leaf = None return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( first_leaf.type == token.ASYNC and second_leaf is not None and second_leaf.type == token.NAME and second_leaf.value == "def" ) @property def is_class_paren_empty(self) -> bool: return ( bool(self) and len(self.leaves) == 4 and self.is_class and self.leaves[2].type == token.LPAR and self.leaves[2].value == "(" and self.leaves[3].type == token.RPAR and self.leaves[3].value == ")" ) @property def is_triple_quoted_string(self) -> bool: return ( bool(self) and self.leaves[0].type == token.STRING and self.leaves[0].value.startswith(('"""', "'''")) ) def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: for leaf in self.leaves: if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: return True return False def contains_uncollapsable_type_comments(self) -> bool: ignored_ids = set() try: last_leaf = self.leaves[-1] ignored_ids.add(id(last_leaf)) if last_leaf.type == token.COMMA or ( last_leaf.type == token.RPAR and not last_leaf.value ): # When trailing commas or optional parens are inserted by Black for # consistency, comments after the previous last element are not moved # (they don't have to, rendering will still be correct). So we ignore # trailing commas and invisible. last_leaf = self.leaves[-2] ignored_ids.add(id(last_leaf)) except IndexError: return False # A type comment is uncollapsable if it is attached to a leaf # that isn't at the end of the line (since that could cause it # to get associated to a different argument) or if there are # comments before it (since that could cause it to get hidden # behind a comment. comment_seen = False for leaf_id, comments in self.comments.items(): for comment in comments: if is_type_comment(comment): if comment_seen or ( not is_type_comment(comment, " ignore") and leaf_id not in ignored_ids ): return True comment_seen = True return False def contains_unsplittable_type_ignore(self) -> bool: if not self.leaves: return False # If a 'type: ignore' is attached to the end of a line, we # can't split the line, because we can't know which of the # subexpressions the ignore was meant to apply to. # # We only want this to apply to actual physical lines from the # original source, though: we don't want the presence of a # 'type: ignore' at the end of a multiline expression to # justify pushing it all onto one line. Thus we # (unfortunately) need to check the actual source lines and # only report an unsplittable 'type: ignore' if this line was # one line in the original code. # Grab the first and last line numbers, skipping generated leaves first_line = next((l.lineno for l in self.leaves if l.lineno != 0), 0) last_line = next((l.lineno for l in reversed(self.leaves) if l.lineno != 0), 0) if first_line == last_line: # We look at the last two leaves since a comma or an # invisible paren could have been added at the end of the # line. for node in self.leaves[-2:]: for comment in self.comments.get(id(node), []): if is_type_comment(comment, " ignore"): return True return False def contains_multiline_strings(self) -> bool: for leaf in self.leaves: if is_multiline_string(leaf): return True return False def maybe_remove_trailing_comma(self, closing: Leaf) -> bool: if not (self.leaves and self.leaves[-1].type == token.COMMA): return False # We remove trailing commas only in the case of importing a # single name from a module. if not ( self.leaves and self.is_import and len(self.leaves) > 4 and self.leaves[-1].type == token.COMMA and closing.type in CLOSING_BRACKETS and self.leaves[-4].type == token.NAME and ( # regular `from foo import bar,` self.leaves[-4].value == "import" # `from foo import (bar as baz,) or ( len(self.leaves) > 6 and self.leaves[-6].value == "import" and self.leaves[-3].value == "as" ) # `from foo import bar as baz,` or ( len(self.leaves) > 5 and self.leaves[-5].value == "import" and self.leaves[-3].value == "as" ) ) and closing.type == token.RPAR ): return False self.remove_trailing_comma() return True def append_comment(self, comment: Leaf) -> bool: if ( comment.type == STANDALONE_COMMENT and self.bracket_tracker.any_open_brackets() ): comment.prefix = "" return False if comment.type != token.COMMENT: return False if not self.leaves: comment.type = STANDALONE_COMMENT comment.prefix = "" return False last_leaf = self.leaves[-1] if ( last_leaf.type == token.RPAR and not last_leaf.value and last_leaf.parent and len(list(last_leaf.parent.leaves())) <= 3 and not is_type_comment(comment) ): # Comments on an optional parens wrapping a single leaf should belong to # the wrapped node except if it's a type comment. Pinning the comment like # this avoids unstable formatting caused by comment migration. if len(self.leaves) < 2: comment.type = STANDALONE_COMMENT comment.prefix = "" return False last_leaf = self.leaves[-2] self.comments.setdefault(id(last_leaf), []).append(comment) return True def comments_after(self, leaf: Leaf) -> List[Leaf]: return self.comments.get(id(leaf), []) def remove_trailing_comma(self) -> None: trailing_comma = self.leaves.pop() trailing_comma_comments = self.comments.pop(id(trailing_comma), []) self.comments.setdefault(id(self.leaves[-1]), []).extend( trailing_comma_comments ) def is_complex_subscript(self, leaf: Leaf) -> bool: open_lsqb = self.bracket_tracker.get_open_lsqb() if open_lsqb is None: return False subscript_start = open_lsqb.next_sibling if isinstance(subscript_start, Node): if subscript_start.type == syms.listmaker: return False if subscript_start.type == syms.subscriptlist: subscript_start = child_towards(subscript_start, leaf) return subscript_start is not None and any( n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() ) def __str__(self) -> str: if not self: return "\n" indent = " " * self.depth leaves = iter(self.leaves) first = next(leaves) res = f"{first.prefix}{indent}{first.value}" for leaf in leaves: res += str(leaf) for comment in itertools.chain.from_iterable(self.comments.values()): res += str(comment) return res + "\n" def __bool__(self) -> bool: return bool(self.leaves or self.comments) @dataclass class EmptyLineTracker: is_pyi: bool = False previous_line: Optional[Line] = None previous_after: int = 0 previous_defs: List[int] = field(default_factory=list) def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: before, after = self._maybe_empty_lines(current_line) before = ( # Black should not insert empty lines at the beginning # of the file 0 if self.previous_line is None else before - self.previous_after ) self.previous_after = after self.previous_line = current_line return before, after def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: max_allowed = 1 if current_line.depth == 0: max_allowed = 1 if self.is_pyi else 2 if current_line.leaves: # Consume the first leaf's extra newlines. first_leaf = current_line.leaves[0] before = first_leaf.prefix.count("\n") before = min(before, max_allowed) first_leaf.prefix = "" else: before = 0 depth = current_line.depth while self.previous_defs and self.previous_defs[-1] >= depth: self.previous_defs.pop() if self.is_pyi: before = 0 if depth else 1 else: before = 1 if depth else 2 if current_line.is_decorator or current_line.is_def or current_line.is_class: return self._maybe_empty_lines_for_class_or_def(current_line, before) if ( self.previous_line and self.previous_line.is_import and not current_line.is_import and depth == self.previous_line.depth ): return (before or 1), 0 if ( self.previous_line and self.previous_line.is_class and current_line.is_triple_quoted_string ): return before, 1 return before, 0 def _maybe_empty_lines_for_class_or_def( self, current_line: Line, before: int ) -> Tuple[int, int]: if not current_line.is_decorator: self.previous_defs.append(current_line.depth) if self.previous_line is None: # Don't insert empty lines before the first line in the file. return 0, 0 if self.previous_line.is_decorator: return 0, 0 if self.previous_line.depth < current_line.depth and ( self.previous_line.is_class or self.previous_line.is_def ): return 0, 0 if ( self.previous_line.is_comment and self.previous_line.depth == current_line.depth and before == 0 ): return 0, 0 if self.is_pyi: if self.previous_line.depth > current_line.depth: newlines = 1 elif current_line.is_class or self.previous_line.is_class: if current_line.is_stub_class and self.previous_line.is_stub_class: # No blank line between classes with an empty body newlines = 0 else: newlines = 1 elif current_line.is_def and not self.previous_line.is_def: # Blank line between a block of functions and a block of non-functions newlines = 1 else: newlines = 0 else: newlines = 2 if current_line.depth and newlines: newlines -= 1 return newlines, 0 @dataclass class LineGenerator(Visitor[Line]): is_pyi: bool = False normalize_strings: bool = True current_line: Line = field(default_factory=Line) remove_u_prefix: bool = False def line(self, indent: int = 0) -> Iterator[Line]: if not self.current_line: self.current_line.depth += indent return # Line is empty, don't emit. Creating a new one unnecessary. complete_line = self.current_line self.current_line = Line(depth=complete_line.depth + indent) yield complete_line def visit_default(self, node: LN) -> Iterator[Line]: if isinstance(node, Leaf): any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() for comment in generate_comments(node): if any_open_brackets: # any comment within brackets is subject to splitting self.current_line.append(comment) elif comment.type == token.COMMENT: # regular trailing comment self.current_line.append(comment) yield from self.line() else: # regular standalone comment yield from self.line() self.current_line.append(comment) yield from self.line() normalize_prefix(node, inside_brackets=any_open_brackets) if self.normalize_strings and node.type == token.STRING: normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix) normalize_string_quotes(node) if node.type == token.NUMBER: normalize_numeric_literal(node) if node.type not in WHITESPACE: self.current_line.append(node) yield from super().visit_default(node) def visit_INDENT(self, node: Leaf) -> Iterator[Line]: # In blib2to3 INDENT never holds comments. yield from self.line(+1) yield from self.visit_default(node) def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: # The current line might still wait for trailing comments. At DEDENT time # there won't be any (they would be prefixes on the preceding NEWLINE). # Emit the line then. yield from self.line() # While DEDENT has no value, its prefix may contain standalone comments # that belong to the current indentation level. Get 'em. yield from self.visit_default(node) # Finally, emit the dedent. yield from self.line(-1) def visit_stmt( self, node: Node, keywords: Set[str], parens: Set[str] ) -> Iterator[Line]: normalize_invisible_parens(node, parens_after=parens) for child in node.children: if child.type == token.NAME and child.value in keywords: # type: ignore yield from self.line() yield from self.visit(child) def visit_suite(self, node: Node) -> Iterator[Line]: if self.is_pyi and is_stub_suite(node): yield from self.visit(node.children[2]) else: yield from self.visit_default(node) def visit_simple_stmt(self, node: Node) -> Iterator[Line]: is_suite_like = node.parent and node.parent.type in STATEMENT if is_suite_like: if self.is_pyi and is_stub_body(node): yield from self.visit_default(node) else: yield from self.line(+1) yield from self.visit_default(node) yield from self.line(-1) else: if not self.is_pyi or not node.parent or not is_stub_suite(node.parent): yield from self.line() yield from self.visit_default(node) def visit_async_stmt(self, node: Node) -> Iterator[Line]: yield from self.line() children = iter(node.children) for child in children: yield from self.visit(child) if child.type == token.ASYNC: break internal_stmt = next(children) for child in internal_stmt.children: yield from self.visit(child) def visit_decorators(self, node: Node) -> Iterator[Line]: for child in node.children: yield from self.line() yield from self.visit(child) def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: yield from self.line() def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: yield from self.visit_default(leaf) yield from self.line() def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: if not self.current_line.bracket_tracker.any_open_brackets(): yield from self.line() yield from self.visit_default(leaf) def visit_factor(self, node: Node) -> Iterator[Line]: _operator, operand = node.children if ( operand.type == syms.power and len(operand.children) == 3 and operand.children[1].type == token.DOUBLESTAR ): lpar = Leaf(token.LPAR, "(") rpar = Leaf(token.RPAR, ")") index = operand.remove() or 0 node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) yield from self.visit_default(node) def __post_init__(self) -> None: v = self.visit_stmt Ø: Set[str] = set() self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) self.visit_if_stmt = partial( v, keywords={"if", "else", "elif"}, parens={"if", "elif"} ) self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) self.visit_try_stmt = partial( v, keywords={"try", "except", "else", "finally"}, parens=Ø ) self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø) self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) self.visit_async_funcdef = self.visit_async_stmt self.visit_decorated = self.visit_decorators IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist} BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE} OPENING_BRACKETS = set(BRACKET.keys()) CLOSING_BRACKETS = set(BRACKET.values()) BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT} def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 NO = "" SPACE = " " DOUBLESPACE = " " t = leaf.type p = leaf.parent v = leaf.value if t in ALWAYS_NO_SPACE: return NO if t == token.COMMENT: return DOUBLESPACE assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" if t == token.COLON and p.type not in { syms.subscript, syms.subscriptlist, syms.sliceop, }: return NO prev = leaf.prev_sibling if not prev: prevp = preceding_leaf(p) if not prevp or prevp.type in OPENING_BRACKETS: return NO if t == token.COLON: if prevp.type == token.COLON: return NO elif prevp.type != token.COMMA and not complex_subscript: return NO return SPACE if prevp.type == token.EQUAL: if prevp.parent: if prevp.parent.type in { syms.arglist, syms.argument, syms.parameters, syms.varargslist, }: return NO elif prevp.parent.type == syms.typedargslist: # A bit hacky: if the equal sign has whitespace, it means we # previously found it's a typed argument. So, we're using # that, too. return prevp.prefix elif prevp.type in VARARGS_SPECIALS: if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): return NO elif prevp.type == token.COLON: if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: return SPACE if complex_subscript else NO elif ( prevp.parent and prevp.parent.type == syms.factor and prevp.type in MATH_OPERATORS ): return NO elif ( prevp.type == token.RIGHTSHIFT and prevp.parent and prevp.parent.type == syms.shift_expr and prevp.prev_sibling and prevp.prev_sibling.type == token.NAME and prevp.prev_sibling.value == "print" # type: ignore ): # Python 2 print chevron return NO elif prev.type in OPENING_BRACKETS: return NO if p.type in {syms.parameters, syms.arglist}: # untyped function signatures or calls if not prev or prev.type != token.COMMA: return NO elif p.type == syms.varargslist: # lambdas if prev and prev.type != token.COMMA: return NO elif p.type == syms.typedargslist: # typed function signatures if not prev: return NO if t == token.EQUAL: if prev.type != syms.tname: return NO elif prev.type == token.EQUAL: # A bit hacky: if the equal sign has whitespace, it means we # previously found it's a typed argument. So, we're using that, too. return prev.prefix elif prev.type != token.COMMA: return NO elif p.type == syms.tname: # type names if not prev: prevp = preceding_leaf(p) if not prevp or prevp.type != token.COMMA: return NO elif p.type == syms.trailer: # attributes and calls if t == token.LPAR or t == token.RPAR: return NO if not prev: if t == token.DOT: prevp = preceding_leaf(p) if not prevp or prevp.type != token.NUMBER: return NO elif t == token.LSQB: return NO elif prev.type != token.COMMA: return NO elif p.type == syms.argument: # single argument if t == token.EQUAL: return NO if not prev: prevp = preceding_leaf(p) if not prevp or prevp.type == token.LPAR: return NO elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: return NO elif p.type == syms.decorator: # decorators return NO elif p.type == syms.dotted_name: if prev: return NO prevp = preceding_leaf(p) if not prevp or prevp.type == token.AT or prevp.type == token.DOT: return NO elif p.type == syms.classdef: if t == token.LPAR: return NO if prev and prev.type == token.LPAR: return NO elif p.type in {syms.subscript, syms.sliceop}: # indexing if not prev: assert p.parent is not None, "subscripts are always parented" if p.parent.type == syms.subscriptlist: return SPACE return NO elif not complex_subscript: return NO elif p.type == syms.atom: if prev and t == token.DOT: # dots, but not the first one. return NO elif p.type == syms.dictsetmaker: # dict unpacking if prev and prev.type == token.DOUBLESTAR: return NO elif p.type in {syms.factor, syms.star_expr}: # unary ops if not prev: prevp = preceding_leaf(p) if not prevp or prevp.type in OPENING_BRACKETS: return NO prevp_parent = prevp.parent assert prevp_parent is not None if prevp.type == token.COLON and prevp_parent.type in { syms.subscript, syms.sliceop, }: return NO elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: return NO elif t in {token.NAME, token.NUMBER, token.STRING}: return NO elif p.type == syms.import_from: if t == token.DOT: if prev and prev.type == token.DOT: return NO elif t == token.NAME: if v == "import": return SPACE if prev and prev.type == token.DOT: return NO elif p.type == syms.sliceop: return NO return SPACE def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: while node: res = node.prev_sibling if res: if isinstance(res, Leaf): return res try: return list(res.leaves())[-1] except IndexError: return None node = node.parent return None def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]: node: Optional[LN] = descendant while node and node.parent != ancestor: node = node.parent return node def container_of(leaf: Leaf) -> LN: same_prefix = leaf.prefix container: LN = leaf while container: parent = container.parent if parent is None: break if parent.children[0].prefix != same_prefix: break if parent.type == syms.file_input: break if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: break container = parent return container def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: if leaf.type == token.COMMA: return COMMA_PRIORITY return 0 def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): # * and ** might also be MATH_OPERATORS but in this case they are not. # Don't treat them as a delimiter. return 0 if ( leaf.type == token.DOT and leaf.parent and leaf.parent.type not in {syms.import_from, syms.dotted_name} and (previous is None or previous.type in CLOSING_BRACKETS) ): return DOT_PRIORITY if ( leaf.type in MATH_OPERATORS and leaf.parent and leaf.parent.type not in {syms.factor, syms.star_expr} ): return MATH_PRIORITIES[leaf.type] if leaf.type in COMPARATORS: return COMPARATOR_PRIORITY if ( leaf.type == token.STRING and previous is not None and previous.type == token.STRING ): return STRING_PRIORITY if leaf.type not in {token.NAME, token.ASYNC}: return 0 if ( leaf.value == "for" and leaf.parent and leaf.parent.type in {syms.comp_for, syms.old_comp_for} or leaf.type == token.ASYNC ): if ( not isinstance(leaf.prev_sibling, Leaf) or leaf.prev_sibling.value != "async" ): return COMPREHENSION_PRIORITY if ( leaf.value == "if" and leaf.parent and leaf.parent.type in {syms.comp_if, syms.old_comp_if} ): return COMPREHENSION_PRIORITY if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: return TERNARY_PRIORITY if leaf.value == "is": return COMPARATOR_PRIORITY if ( leaf.value == "in" and leaf.parent and leaf.parent.type in {syms.comp_op, syms.comparison} and not ( previous is not None and previous.type == token.NAME and previous.value == "not" ) ): return COMPARATOR_PRIORITY if ( leaf.value == "not" and leaf.parent and leaf.parent.type == syms.comp_op and not ( previous is not None and previous.type == token.NAME and previous.value == "is" ) ): return COMPARATOR_PRIORITY if leaf.value in LOGIC_OPERATORS and leaf.parent: return LOGIC_PRIORITY return 0 FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"} FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"} def generate_comments(leaf: LN) -> Iterator[Leaf]: for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER): yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) @dataclass class ProtoComment: type: int # token.COMMENT or STANDALONE_COMMENT value: str # content of the comment newlines: int # how many newlines before the comment consumed: int # how many characters of the original leaf's prefix did we consume @lru_cache(maxsize=4096) def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: result: List[ProtoComment] = [] if not prefix or "#" not in prefix: return result consumed = 0 nlines = 0 ignored_lines = 0 for index, line in enumerate(prefix.split("\n")): consumed += len(line) + 1 # adding the length of the split '\n' line = line.lstrip() if not line: nlines += 1 if not line.startswith("#"): # Escaped newlines outside of a comment are not really newlines at # all. We treat a single-line comment following an escaped newline # as a simple trailing comment. if line.endswith("\\"): ignored_lines += 1 continue if index == ignored_lines and not is_endmarker: comment_type = token.COMMENT # simple trailing comment else: comment_type = STANDALONE_COMMENT comment = make_comment(line) result.append( ProtoComment( type=comment_type, value=comment, newlines=nlines, consumed=consumed ) ) nlines = 0 return result def make_comment(content: str) -> str: content = content.rstrip() if not content: return "#" if content[0] == "#": content = content[1:] if content and content[0] not in " !:#'%": content = " " + content return "#" + content def split_line( line: Line, line_length: int, inner: bool = False, features: Collection[Feature] = (), ) -> Iterator[Line]: if line.is_comment: yield line return line_str = str(line).strip("\n") if ( not line.contains_uncollapsable_type_comments() and not line.should_explode and not line.is_collection_with_optional_trailing_comma and ( is_line_short_enough(line, line_length=line_length, line_str=line_str) or line.contains_unsplittable_type_ignore() ) ): yield line return split_funcs: List[SplitFunc] if line.is_def: split_funcs = [left_hand_split] else: def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]: for omit in generate_trailers_to_omit(line, line_length): lines = list(right_hand_split(line, line_length, features, omit=omit)) if is_line_short_enough(lines[0], line_length=line_length): yield from lines return # All splits failed, best effort split with no omits. # This mostly happens to multiline strings that are by definition # reported as not fitting a single line. # line_length=1 here was historically a bug that somehow became a feature. # See #762 and #781 for the full story. yield from right_hand_split(line, line_length=1, features=features) if line.inside_brackets: split_funcs = [delimiter_split, standalone_comment_split, rhs] else: split_funcs = [rhs] for split_func in split_funcs: # We are accumulating lines in `result` because we might want to abort # mission and return the original line in the end, or attempt a different # split altogether. result: List[Line] = [] try: for l in split_func(line, features): if str(l).strip("\n") == line_str: raise CannotSplit("Split function returned an unchanged result") result.extend( split_line( l, line_length=line_length, inner=True, features=features ) ) except CannotSplit: continue else: yield from result break else: yield line def left_hand_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: tail_leaves: List[Leaf] = [] body_leaves: List[Leaf] = [] head_leaves: List[Leaf] = [] current_leaves = head_leaves matching_bracket: Optional[Leaf] = None for leaf in line.leaves: if ( current_leaves is body_leaves and leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is matching_bracket ): current_leaves = tail_leaves if body_leaves else head_leaves current_leaves.append(leaf) if current_leaves is head_leaves: if leaf.type in OPENING_BRACKETS: matching_bracket = leaf current_leaves = body_leaves if not matching_bracket: raise CannotSplit("No brackets found") head = bracket_split_build_line(head_leaves, line, matching_bracket) body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True) tail = bracket_split_build_line(tail_leaves, line, matching_bracket) bracket_split_succeeded_or_raise(head, body, tail) for result in (head, body, tail): if result: yield result def right_hand_split( line: Line, line_length: int, features: Collection[Feature] = (), omit: Collection[LeafID] = (), ) -> Iterator[Line]: tail_leaves: List[Leaf] = [] body_leaves: List[Leaf] = [] head_leaves: List[Leaf] = [] current_leaves = tail_leaves opening_bracket: Optional[Leaf] = None closing_bracket: Optional[Leaf] = None for leaf in reversed(line.leaves): if current_leaves is body_leaves: if leaf is opening_bracket: current_leaves = head_leaves if body_leaves else tail_leaves current_leaves.append(leaf) if current_leaves is tail_leaves: if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: opening_bracket = leaf.opening_bracket closing_bracket = leaf current_leaves = body_leaves if not (opening_bracket and closing_bracket and head_leaves): # If there is no opening or closing_bracket that means the split failed and # all content is in the tail. Otherwise, if `head_leaves` are empty, it means # the matching `opening_bracket` wasn't available on `line` anymore. raise CannotSplit("No brackets found") tail_leaves.reverse() body_leaves.reverse() head_leaves.reverse() head = bracket_split_build_line(head_leaves, line, opening_bracket) body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True) tail = bracket_split_build_line(tail_leaves, line, opening_bracket) bracket_split_succeeded_or_raise(head, body, tail) if ( # the body shouldn't be exploded not body.should_explode # the opening bracket is an optional paren and opening_bracket.type == token.LPAR and not opening_bracket.value # the closing bracket is an optional paren and closing_bracket.type == token.RPAR and not closing_bracket.value # it's not an import (optional parens are the only thing we can split on # in this case; attempting a split without them is a waste of time) and not line.is_import # there are no standalone comments in the body and not body.contains_standalone_comments(0) # and we can actually remove the parens and can_omit_invisible_parens(body, line_length) ): omit = {id(closing_bracket), *omit} try: yield from right_hand_split(line, line_length, features=features, omit=omit) return except CannotSplit: if not ( can_be_split(body) or is_line_short_enough(body, line_length=line_length) ): raise CannotSplit( "Splitting failed, body is still too long and can't be split." ) elif head.contains_multiline_strings() or tail.contains_multiline_strings(): raise CannotSplit( "The current optional pair of parentheses is bound to fail to " "satisfy the splitting algorithm because the head or the tail " "contains multiline strings which by definition never fit one " "line." ) ensure_visible(opening_bracket) ensure_visible(closing_bracket) for result in (head, body, tail): if result: yield result def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: tail_len = len(str(tail).strip()) if not body: if tail_len == 0: raise CannotSplit("Splitting brackets produced the same line") elif tail_len < 3: raise CannotSplit( f"Splitting brackets on an empty body to save " f"{tail_len} characters is not worth it" ) def bracket_split_build_line( leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False ) -> Line: result = Line(depth=original.depth) if is_body: result.inside_brackets = True result.depth += 1 if leaves: # Since body is a new indent level, remove spurious leading whitespace. normalize_prefix(leaves[0], inside_brackets=True) # Ensure a trailing comma for imports and standalone function arguments, but # be careful not to add one after any comments or within type annotations. no_commas = ( original.is_def and opening_bracket.value == "(" and not any(l.type == token.COMMA for l in leaves) ) if original.is_import or no_commas: for i in range(len(leaves) - 1, -1, -1): if leaves[i].type == STANDALONE_COMMENT: continue if leaves[i].type != token.COMMA: leaves.insert(i + 1, Leaf(token.COMMA, ",")) break # Populate the line for leaf in leaves: result.append(leaf, preformatted=True) for comment_after in original.comments_after(leaf): result.append(comment_after, preformatted=True) if is_body: result.should_explode = should_explode(result, opening_bracket) return result def dont_increase_indentation(split_func: SplitFunc) -> SplitFunc: @wraps(split_func) def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: for l in split_func(line, features): normalize_prefix(l.leaves[0], inside_brackets=True) yield l return split_wrapper @dont_increase_indentation def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: try: last_leaf = line.leaves[-1] except IndexError: raise CannotSplit("Line empty") bt = line.bracket_tracker try: delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) except ValueError: raise CannotSplit("No delimiters found") if delimiter_priority == DOT_PRIORITY: if bt.delimiter_count_with_priority(delimiter_priority) == 1: raise CannotSplit("Splitting a single attribute from its owner looks wrong") current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) lowest_depth = sys.maxsize trailing_comma_safe = True def append_to_line(leaf: Leaf) -> Iterator[Line]: nonlocal current_line try: current_line.append_safe(leaf, preformatted=True) except ValueError: yield current_line current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) current_line.append(leaf) for leaf in line.leaves: yield from append_to_line(leaf) for comment_after in line.comments_after(leaf): yield from append_to_line(comment_after) lowest_depth = min(lowest_depth, leaf.bracket_depth) if leaf.bracket_depth == lowest_depth: if is_vararg(leaf, within={syms.typedargslist}): trailing_comma_safe = ( trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features ) elif is_vararg(leaf, within={syms.arglist, syms.argument}): trailing_comma_safe = ( trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features ) leaf_priority = bt.delimiters.get(id(leaf)) if leaf_priority == delimiter_priority: yield current_line current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) if current_line: if ( trailing_comma_safe and delimiter_priority == COMMA_PRIORITY and current_line.leaves[-1].type != token.COMMA and current_line.leaves[-1].type != STANDALONE_COMMENT ): current_line.append(Leaf(token.COMMA, ",")) yield current_line @dont_increase_indentation def standalone_comment_split( line: Line, features: Collection[Feature] = () ) -> Iterator[Line]: if not line.contains_standalone_comments(0): raise CannotSplit("Line does not have any standalone comments") current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) def append_to_line(leaf: Leaf) -> Iterator[Line]: nonlocal current_line try: current_line.append_safe(leaf, preformatted=True) except ValueError: yield current_line current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) current_line.append(leaf) for leaf in line.leaves: yield from append_to_line(leaf) for comment_after in line.comments_after(leaf): yield from append_to_line(comment_after) if current_line: yield current_line def is_import(leaf: Leaf) -> bool: p = leaf.parent t = leaf.type v = leaf.value return bool( t == token.NAME and ( (v == "import" and p and p.type == syms.import_name) or (v == "from" and p and p.type == syms.import_from) ) ) def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: t = leaf.type v = leaf.value return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: if not inside_brackets: spl = leaf.prefix.split("#") if "\\" not in spl[0]: nl_count = spl[-1].count("\n") if len(spl) > 1: nl_count -= 1 leaf.prefix = "\n" * nl_count return leaf.prefix = "" def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None: match = re.match(r"^([furbFURB]*)(.*)$", leaf.value, re.DOTALL) assert match is not None, f"failed to match string {leaf.value!r}" orig_prefix = match.group(1) new_prefix = orig_prefix.lower() if remove_u_prefix: new_prefix = new_prefix.replace("u", "") leaf.value = f"{new_prefix}{match.group(2)}" def normalize_string_quotes(leaf: Leaf) -> None: value = leaf.value.lstrip("furbFURB") if value[:3] == '"""': return elif value[:3] == "'''": orig_quote = "'''" new_quote = '"""' elif value[0] == '"': orig_quote = '"' new_quote = "'" else: orig_quote = "'" new_quote = '"' first_quote_pos = leaf.value.find(orig_quote) if first_quote_pos == -1: return # There's an internal error prefix = leaf.value[:first_quote_pos] unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}") escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)] if "r" in prefix.casefold(): if unescaped_new_quote.search(body): # There's at least one unescaped new_quote in this raw string # so converting is impossible return # Do not introduce or remove backslashes in raw strings new_body = body else: # remove unnecessary escapes new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) if body != new_body: # Consider the string without unnecessary escapes as the original body = new_body leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}" new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) if "f" in prefix.casefold(): matches = re.findall( r""" (?:[^{]|^)\{ # start of the string or a non-{ followed by a single { ([^{].*?) # contents of the brackets except if begins with {{ \}(?:[^}]|$) # A } followed by end of the string or a non-} """, new_body, re.VERBOSE, ) for m in matches: if "\\" in str(m): # Do not introduce backslashes in interpolated expressions return if new_quote == '"""' and new_body[-1:] == '"': # edge case: new_body = new_body[:-1] + '\\"' orig_escape_count = body.count("\\") new_escape_count = new_body.count("\\") if new_escape_count > orig_escape_count: return # Do not introduce more escaping if new_escape_count == orig_escape_count and orig_quote == '"': return # Prefer double quotes leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}" def normalize_numeric_literal(leaf: Leaf) -> None: text = leaf.value.lower() if text.startswith(("0o", "0b")): # Leave octal and binary literals alone. pass elif text.startswith("0x"): # Change hex literals to upper case. before, after = text[:2], text[2:] text = f"{before}{after.upper()}" elif "e" in text: before, after = text.split("e") sign = "" if after.startswith("-"): after = after[1:] sign = "-" elif after.startswith("+"): after = after[1:] before = format_float_or_int_string(before) text = f"{before}e{sign}{after}" elif text.endswith(("j", "l")): number = text[:-1] suffix = text[-1] # Capitalize in "2L" because "l" looks too similar to "1". if suffix == "l": suffix = "L" text = f"{format_float_or_int_string(number)}{suffix}" else: text = format_float_or_int_string(text) leaf.value = text def format_float_or_int_string(text: str) -> str: if "." not in text: return text before, after = text.split(".") return f"{before or 0}.{after or 0}" def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: for pc in list_comments(node.prefix, is_endmarker=False): if pc.value in FMT_OFF: # This `node` has a prefix with `# fmt: off`, don't mess with parens. return check_lpar = False for index, child in enumerate(list(node.children)): # Add parentheses around long tuple unpacking in assignments. if ( index == 0 and isinstance(child, Node) and child.type == syms.testlist_star_expr ): check_lpar = True if check_lpar: if is_walrus_assignment(child): continue if child.type == syms.atom: if maybe_make_parens_invisible_in_atom(child, parent=node): wrap_in_parentheses(node, child, visible=False) elif is_one_tuple(child): wrap_in_parentheses(node, child, visible=True) elif node.type == syms.import_from: # "import from" nodes store parentheses directly as part of # the statement if child.type == token.LPAR: # make parentheses invisible child.value = "" # type: ignore node.children[-1].value = "" # type: ignore elif child.type != token.STAR: # insert invisible parentheses node.insert_child(index, Leaf(token.LPAR, "")) node.append_child(Leaf(token.RPAR, "")) break elif not (isinstance(child, Leaf) and is_multiline_string(child)): wrap_in_parentheses(node, child, visible=False) check_lpar = isinstance(child, Leaf) and child.value in parens_after def normalize_fmt_off(node: Node) -> None: try_again = True while try_again: try_again = convert_one_fmt_off_pair(node) def convert_one_fmt_off_pair(node: Node) -> bool: for leaf in node.leaves(): previous_consumed = 0 for comment in list_comments(leaf.prefix, is_endmarker=False): if comment.value in FMT_OFF: # We only want standalone comments. If there's no previous leaf or # the previous leaf is indentation, it's a standalone comment in # disguise. if comment.type != STANDALONE_COMMENT: prev = preceding_leaf(leaf) if prev and prev.type not in WHITESPACE: continue ignored_nodes = list(generate_ignored_nodes(leaf)) if not ignored_nodes: continue first = ignored_nodes[0] # Can be a container node with the `leaf`. parent = first.parent prefix = first.prefix first.prefix = prefix[comment.consumed :] hidden_value = ( comment.value + "\n" + "".join(str(n) for n in ignored_nodes) ) if hidden_value.endswith("\n"): # That happens when one of the `ignored_nodes` ended with a NEWLINE # leaf (possibly followed by a DEDENT). hidden_value = hidden_value[:-1] first_idx: Optional[int] = None for ignored in ignored_nodes: index = ignored.remove() if first_idx is None: first_idx = index assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" parent.insert_child( first_idx, Leaf( STANDALONE_COMMENT, hidden_value, prefix=prefix[:previous_consumed] + "\n" * comment.newlines, ), ) return True previous_consumed = comment.consumed return False def generate_ignored_nodes(leaf: Leaf) -> Iterator[LN]: container: Optional[LN] = container_of(leaf) while container is not None and container.type != token.ENDMARKER: is_fmt_on = False for comment in list_comments(container.prefix, is_endmarker=False): if comment.value in FMT_ON: is_fmt_on = True elif comment.value in FMT_OFF: is_fmt_on = False if is_fmt_on: return yield container container = container.next_sibling def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: if ( node.type != syms.atom or is_empty_tuple(node) or is_one_tuple(node) or (is_yield(node) and parent.type != syms.expr_stmt) or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY ): return False first = node.children[0] last = node.children[-1] if first.type == token.LPAR and last.type == token.RPAR: middle = node.children[1] # make parentheses invisible first.value = "" # type: ignore last.value = "" # type: ignore maybe_make_parens_invisible_in_atom(middle, parent=parent) if is_atom_with_invisible_parens(middle): # Strip the invisible parens from `middle` by replacing # it with the child in-between the invisible parens middle.replace(middle.children[1]) return False return True def is_atom_with_invisible_parens(node: LN) -> bool: if isinstance(node, Leaf) or node.type != syms.atom: return False first, last = node.children[0], node.children[-1] return ( isinstance(first, Leaf) and first.type == token.LPAR and first.value == "" and isinstance(last, Leaf) and last.type == token.RPAR and last.value == "" ) def is_empty_tuple(node: LN) -> bool: return ( node.type == syms.atom and len(node.children) == 2 and node.children[0].type == token.LPAR and node.children[1].type == token.RPAR ) def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]: if len(node.children) != 3: return None lpar, wrapped, rpar = node.children if not (lpar.type == token.LPAR and rpar.type == token.RPAR): return None return wrapped def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: lpar = Leaf(token.LPAR, "(" if visible else "") rpar = Leaf(token.RPAR, ")" if visible else "") prefix = child.prefix child.prefix = "" index = child.remove() or 0 new_child = Node(syms.atom, [lpar, child, rpar]) new_child.prefix = prefix parent.insert_child(index, new_child) def is_one_tuple(node: LN) -> bool: if node.type == syms.atom: gexp = unwrap_singleton_parenthesis(node) if gexp is None or gexp.type != syms.testlist_gexp: return False return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA return ( node.type in IMPLICIT_TUPLE and len(node.children) == 2 and node.children[1].type == token.COMMA ) def is_walrus_assignment(node: LN) -> bool: inner = unwrap_singleton_parenthesis(node) return inner is not None and inner.type == syms.namedexpr_test def is_yield(node: LN) -> bool: if node.type == syms.yield_expr: return True if node.type == token.NAME and node.value == "yield": # type: ignore return True if node.type != syms.atom: return False if len(node.children) != 3: return False lpar, expr, rpar = node.children if lpar.type == token.LPAR and rpar.type == token.RPAR: return is_yield(expr) return False def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: if leaf.type not in VARARGS_SPECIALS or not leaf.parent: return False p = leaf.parent if p.type == syms.star_expr: # Star expressions are also used as assignment targets in extended # iterable unpacking (PEP 3132). See what its parent is instead. if not p.parent: return False p = p.parent return p.type in within def is_multiline_string(leaf: Leaf) -> bool: value = leaf.value.lstrip("furbFURB") return value[:3] in {'"""', "'''"} and "\n" in value def is_stub_suite(node: Node) -> bool: if ( len(node.children) != 4 or node.children[0].type != token.NEWLINE or node.children[1].type != token.INDENT or node.children[3].type != token.DEDENT ): return False return is_stub_body(node.children[2]) def is_stub_body(node: LN) -> bool: if not isinstance(node, Node) or node.type != syms.simple_stmt: return False if len(node.children) != 2: return False child = node.children[0] return ( child.type == syms.atom and len(child.children) == 3 and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) ) def max_delimiter_priority_in_atom(node: LN) -> Priority: if node.type != syms.atom: return 0 first = node.children[0] last = node.children[-1] if not (first.type == token.LPAR and last.type == token.RPAR): return 0 bt = BracketTracker() for c in node.children[1:-1]: if isinstance(c, Leaf): bt.mark(c) else: for leaf in c.leaves(): bt.mark(leaf) try: return bt.max_delimiter_priority() except ValueError: return 0 def ensure_visible(leaf: Leaf) -> None: if leaf.type == token.LPAR: leaf.value = "(" elif leaf.type == token.RPAR: leaf.value = ")" def should_explode(line: Line, opening_bracket: Leaf) -> bool: if not ( opening_bracket.parent and opening_bracket.parent.type in {syms.atom, syms.import_from} and opening_bracket.value in "[{(" ): return False try: last_leaf = line.leaves[-1] exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set() max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) except (IndexError, ValueError): return False return max_priority == COMMA_PRIORITY def get_features_used(node: Node) -> Set[Feature]: features: Set[Feature] = set() for n in node.pre_order(): if n.type == token.STRING: value_head = n.value[:2] # type: ignore if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}: features.add(Feature.F_STRINGS) elif n.type == token.NUMBER: if "_" in n.value: # type: ignore features.add(Feature.NUMERIC_UNDERSCORES) elif n.type == token.SLASH: if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}: features.add(Feature.POS_ONLY_ARGUMENTS) elif n.type == token.COLONEQUAL: features.add(Feature.ASSIGNMENT_EXPRESSIONS) elif ( n.type in {syms.typedargslist, syms.arglist} and n.children and n.children[-1].type == token.COMMA ): if n.type == syms.typedargslist: feature = Feature.TRAILING_COMMA_IN_DEF else: feature = Feature.TRAILING_COMMA_IN_CALL for ch in n.children: if ch.type in STARS: features.add(feature) if ch.type == syms.argument: for argch in ch.children: if argch.type in STARS: features.add(feature) return features def detect_target_versions(node: Node) -> Set[TargetVersion]: features = get_features_used(node) return { version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] } def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: omit: Set[LeafID] = set() yield omit length = 4 * line.depth opening_bracket: Optional[Leaf] = None closing_bracket: Optional[Leaf] = None inner_brackets: Set[LeafID] = set() for index, leaf, leaf_length in enumerate_with_length(line, reversed=True): length += leaf_length if length > line_length: break has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) if leaf.type == STANDALONE_COMMENT or has_inline_comment: break if opening_bracket: if leaf is opening_bracket: opening_bracket = None elif leaf.type in CLOSING_BRACKETS: inner_brackets.add(id(leaf)) elif leaf.type in CLOSING_BRACKETS: if index > 0 and line.leaves[index - 1].type in OPENING_BRACKETS: # Empty brackets would fail a split so treat them as "inner" # brackets (e.g. only add them to the `omit` set if another # pair of brackets was good enough. inner_brackets.add(id(leaf)) continue if closing_bracket: omit.add(id(closing_bracket)) omit.update(inner_brackets) inner_brackets.clear() yield omit if leaf.value: opening_bracket = leaf.opening_bracket closing_bracket = leaf def get_future_imports(node: Node) -> Set[str]: imports: Set[str] = set() def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]: for child in children: if isinstance(child, Leaf): if child.type == token.NAME: yield child.value elif child.type == syms.import_as_name: orig_name = child.children[0] assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports" assert orig_name.type == token.NAME, "Invalid syntax parsing imports" yield orig_name.value elif child.type == syms.import_as_names: yield from get_imports_from_children(child.children) else: raise AssertionError("Invalid syntax parsing imports") for child in node.children: if child.type != syms.simple_stmt: break first_child = child.children[0] if isinstance(first_child, Leaf): # Continue looking if we see a docstring; otherwise stop. if ( len(child.children) == 2 and first_child.type == token.STRING and child.children[1].type == token.NEWLINE ): continue break elif first_child.type == syms.import_from: module_name = first_child.children[1] if not isinstance(module_name, Leaf) or module_name.value != "__future__": break imports |= set(get_imports_from_children(first_child.children[3:])) else: break return imports @lru_cache() def get_gitignore(root: Path) -> PathSpec: gitignore = root / ".gitignore" lines: List[str] = [] if gitignore.is_file(): with gitignore.open() as gf: lines = gf.readlines() return PathSpec.from_lines("gitwildmatch", lines) def gen_python_files_in_dir( path: Path, root: Path, include: Pattern[str], exclude: Pattern[str], report: "Report", gitignore: PathSpec, ) -> Iterator[Path]: assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" for child in path.iterdir(): # First ignore files matching .gitignore if gitignore.match_file(child.as_posix()): report.path_ignored(child, f"matches the .gitignore file content") continue # Then ignore with `exclude` option. try: normalized_path = "/" + child.resolve().relative_to(root).as_posix() except OSError as e: report.path_ignored(child, f"cannot be read because {e}") continue except ValueError: if child.is_symlink(): report.path_ignored( child, f"is a symbolic link that points outside {root}" ) continue raise if child.is_dir(): normalized_path += "/" exclude_match = exclude.search(normalized_path) if exclude_match and exclude_match.group(0): report.path_ignored(child, f"matches the --exclude regular expression") continue if child.is_dir(): yield from gen_python_files_in_dir( child, root, include, exclude, report, gitignore ) elif child.is_file(): include_match = include.search(normalized_path) if include_match: yield child @lru_cache() def find_project_root(srcs: Iterable[str]) -> Path: if not srcs: return Path("/").resolve() common_base = min(Path(src).resolve() for src in srcs) if common_base.is_dir(): # Append a fake file so `parents` below returns `common_base_dir`, too. common_base /= "fake-file" for directory in common_base.parents: if (directory / ".git").is_dir(): return directory if (directory / ".hg").is_dir(): return directory if (directory / "pyproject.toml").is_file(): return directory return directory @dataclass class Report: check: bool = False quiet: bool = False verbose: bool = False change_count: int = 0 same_count: int = 0 failure_count: int = 0 def done(self, src: Path, changed: Changed) -> None: if changed is Changed.YES: reformatted = "would reformat" if self.check else "reformatted" if self.verbose or not self.quiet: out(f"{reformatted} {src}") self.change_count += 1 else: if self.verbose: if changed is Changed.NO: msg = f"{src} already well formatted, good job." else: msg = f"{src} wasn't modified on disk since last run." out(msg, bold=False) self.same_count += 1 def failed(self, src: Path, message: str) -> None: err(f"error: cannot format {src}: {message}") self.failure_count += 1 def path_ignored(self, path: Path, message: str) -> None: if self.verbose: out(f"{path} ignored: {message}", bold=False) @property def return_code(self) -> int: # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with # 126 we have special return codes reserved by the shell. if self.failure_count: return 123 elif self.change_count and self.check: return 1 return 0 def __str__(self) -> str: if self.check: reformatted = "would be reformatted" unchanged = "would be left unchanged" failed = "would fail to reformat" else: reformatted = "reformatted" unchanged = "left unchanged" failed = "failed to reformat" report = [] if self.change_count: s = "s" if self.change_count > 1 else "" report.append( click.style(f"{self.change_count} file{s} {reformatted}", bold=True) ) if self.same_count: s = "s" if self.same_count > 1 else "" report.append(f"{self.same_count} file{s} {unchanged}") if self.failure_count: s = "s" if self.failure_count > 1 else "" report.append( click.style(f"{self.failure_count} file{s} {failed}", fg="red") ) return ", ".join(report) + "." def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]: filename = "<unknown>" if sys.version_info >= (3, 8): # TODO: support Python 4+ ;) for minor_version in range(sys.version_info[1], 4, -1): try: return ast.parse(src, filename, feature_version=(3, minor_version)) except SyntaxError: continue else: for feature_version in (7, 6): try: return ast3.parse(src, filename, feature_version=feature_version) except SyntaxError: continue return ast27.parse(src) def _fixup_ast_constants( node: Union[ast.AST, ast3.AST, ast27.AST] ) -> Union[ast.AST, ast3.AST, ast27.AST]: if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)): return ast.Constant(value=node.s) if isinstance(node, (ast.Num, ast3.Num, ast27.Num)): return ast.Constant(value=node.n) if isinstance(node, (ast.NameConstant, ast3.NameConstant)): return ast.Constant(value=node.value) return node def assert_equivalent(src: str, dst: str) -> None: def _v(node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0) -> Iterator[str]: node = _fixup_ast_constants(node) yield f"{' ' * depth}{node.__class__.__name__}(" for field in sorted(node._fields): # noqa: F402 # TypeIgnore has only one field 'lineno' which breaks this comparison type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore) if sys.version_info >= (3, 8): type_ignore_classes += (ast.TypeIgnore,) if isinstance(node, type_ignore_classes): break try: value = getattr(node, field) except AttributeError: continue yield f"{' ' * (depth+1)}{field}=" if isinstance(value, list): for item in value: # Ignore nested tuples within del statements, because we may insert # parentheses and they change the AST. if ( field == "targets" and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete)) and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple)) ): for item in item.elts: yield from _v(item, depth + 2) elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)): yield from _v(item, depth + 2) elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)): yield from _v(value, depth + 2) else: yield f"{' ' * (depth+2)}{value!r}, # {value.__class__.__name__}" yield f"{' ' * depth}) # /{node.__class__.__name__}" try: src_ast = parse_ast(src) except Exception as exc: raise AssertionError( f"cannot use --safe with this file; failed to parse source file. " f"AST error message: {exc}" ) try: dst_ast = parse_ast(dst) except Exception as exc: log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst) raise AssertionError( f"INTERNAL ERROR: Black produced invalid code: {exc}. " f"Please report a bug on https://github.com/psf/black/issues. " f"This invalid output might be helpful: {log}" ) from None src_ast_str = "\n".join(_v(src_ast)) dst_ast_str = "\n".join(_v(dst_ast)) if src_ast_str != dst_ast_str: log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst")) raise AssertionError( f"INTERNAL ERROR: Black produced code that is not equivalent to " f"the source. " f"Please report a bug on https://github.com/psf/black/issues. " f"This diff might be helpful: {log}" ) from None def assert_stable(src: str, dst: str, mode: FileMode) -> None: newdst = format_str(dst, mode=mode) if dst != newdst: log = dump_to_file( diff(src, dst, "source", "first pass"), diff(dst, newdst, "first pass", "second pass"), ) raise AssertionError( f"INTERNAL ERROR: Black produced different code on the second pass " f"of the formatter. " f"Please report a bug on https://github.com/psf/black/issues. " f"This diff might be helpful: {log}" ) from None @mypyc_attr(patchable=True) def dump_to_file(*output: str) -> str: with tempfile.NamedTemporaryFile( mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" ) as f: for lines in output: f.write(lines) if lines and lines[-1] != "\n": f.write("\n") return f.name @contextmanager def nullcontext() -> Iterator[None]: yield def diff(a: str, b: str, a_name: str, b_name: str) -> str: import difflib a_lines = [line + "\n" for line in a.split("\n")] b_lines = [line + "\n" for line in b.split("\n")] return "".join( difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5) ) def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None: err("Aborted!") for task in tasks: task.cancel() def shutdown(loop: asyncio.AbstractEventLoop) -> None: try: if sys.version_info[:2] >= (3, 7): all_tasks = asyncio.all_tasks else: all_tasks = asyncio.Task.all_tasks # This part is borrowed from asyncio/runners.py in Python 3.7b2. to_cancel = [task for task in all_tasks(loop) if not task.done()] if not to_cancel: return for task in to_cancel: task.cancel() loop.run_until_complete( asyncio.gather(*to_cancel, loop=loop, return_exceptions=True) ) finally: # `concurrent.futures.Future` objects cannot be cancelled once they # are already running. There might be some when the `shutdown()` happened. # Silence their logger's spew about the event loop being closed. cf_logger = logging.getLogger("concurrent.futures") cf_logger.setLevel(logging.CRITICAL) loop.close() def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: return regex.sub(replacement, regex.sub(replacement, original)) def re_compile_maybe_verbose(regex: str) -> Pattern[str]: if "\n" in regex: regex = "(?x)" + regex compiled: Pattern[str] = re.compile(regex) return compiled def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: index = len(sequence) - 1 for element in reversed(sequence): yield (index, element) index -= 1 def enumerate_with_length( line: Line, reversed: bool = False ) -> Iterator[Tuple[Index, Leaf, int]]: op = cast( Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], enumerate_reversed if reversed else enumerate, ) for index, leaf in op(line.leaves): length = len(leaf.prefix) + len(leaf.value) if "\n" in leaf.value: return # Multiline strings, we can't continue. for comment in line.comments_after(leaf): length += len(comment.value) yield index, leaf, length def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool: if not line_str: line_str = str(line).strip("\n") return ( len(line_str) <= line_length and "\n" not in line_str # multiline strings and not line.contains_standalone_comments() ) def can_be_split(line: Line) -> bool: leaves = line.leaves if len(leaves) < 2: return False if leaves[0].type == token.STRING and leaves[1].type == token.DOT: call_count = 0 dot_count = 0 next = leaves[-1] for leaf in leaves[-2::-1]: if leaf.type in OPENING_BRACKETS: if next.type not in CLOSING_BRACKETS: return False call_count += 1 elif leaf.type == token.DOT: dot_count += 1 elif leaf.type == token.NAME: if not (next.type == token.DOT or next.type in OPENING_BRACKETS): return False elif leaf.type not in CLOSING_BRACKETS: return False if dot_count > 1 and call_count > 1: return False return True def can_omit_invisible_parens(line: Line, line_length: int) -> bool: bt = line.bracket_tracker if not bt.delimiters: # Without delimiters the optional parentheses are useless. return True max_priority = bt.max_delimiter_priority() if bt.delimiter_count_with_priority(max_priority) > 1: # With more than one delimiter of a kind the optional parentheses read better. return False if max_priority == DOT_PRIORITY: # A single stranded method call doesn't require optional parentheses. return True assert len(line.leaves) >= 2, "Stranded delimiter" first = line.leaves[0] second = line.leaves[1] penultimate = line.leaves[-2] last = line.leaves[-1] # With a single delimiter, omit if the expression starts or ends with # a bracket. if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: remainder = False length = 4 * line.depth for _index, leaf, leaf_length in enumerate_with_length(line): if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: remainder = True if remainder: length += leaf_length if length > line_length: break if leaf.type in OPENING_BRACKETS: # There are brackets we can further split on. remainder = False else: # checked the entire string and line length wasn't exceeded if len(line.leaves) == _index + 1: return True # Note: we are not returning False here because a line might have *both* # a leading opening bracket and a trailing closing bracket. If the # opening bracket doesn't match our rule, maybe the closing will. if ( last.type == token.RPAR or last.type == token.RBRACE or ( # don't use indexing for omitting optional parentheses; # it looks weird last.type == token.RSQB and last.parent and last.parent.type != syms.trailer ) ): if penultimate.type in OPENING_BRACKETS: # Empty brackets don't help. return False if is_multiline_string(first): # Additional wrapping of a multiline string in this situation is # unnecessary. return True length = 4 * line.depth seen_other_brackets = False for _index, leaf, leaf_length in enumerate_with_length(line): length += leaf_length if leaf is last.opening_bracket: if seen_other_brackets or length <= line_length: return True elif leaf.type in OPENING_BRACKETS: # There are brackets we can further split on. seen_other_brackets = True return False def get_cache_file(mode: FileMode) -> Path: return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" def read_cache(mode: FileMode) -> Cache: cache_file = get_cache_file(mode) if not cache_file.exists(): return {} with cache_file.open("rb") as fobj: try: cache: Cache = pickle.load(fobj) except (pickle.UnpicklingError, ValueError): return {} return cache def get_cache_info(path: Path) -> CacheInfo: stat = path.stat() return stat.st_mtime, stat.st_size def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: todo, done = set(), set() for src in sources: src = src.resolve() if cache.get(src) != get_cache_info(src): todo.add(src) else: done.add(src) return todo, done def write_cache(cache: Cache, sources: Iterable[Path], mode: FileMode) -> None: cache_file = get_cache_file(mode) try: CACHE_DIR.mkdir(parents=True, exist_ok=True) new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}} with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f: pickle.dump(new_cache, f, protocol=4) os.replace(f.name, cache_file) except OSError: pass def patch_click() -> None: try: from click import core from click import _unicodefun # type: ignore except ModuleNotFoundError: return for module in (core, _unicodefun): if hasattr(module, "_verify_python3_env"): module._verify_python3_env = lambda: None def patched_main() -> None: freeze_support() patch_click() main() if __name__ == "__main__": patched_main()
true
true
1c3566a8da172b2be984d3cd544181a39dcaeebe
4,256
py
Python
azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/express_route_service_providers_operations.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2021-09-07T18:36:04.000Z
2021-09-07T18:36:04.000Z
azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/express_route_service_providers_operations.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
2
2019-10-02T23:37:38.000Z
2020-10-02T01:17:31.000Z
azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/express_route_service_providers_operations.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2018-08-28T14:36:47.000Z
2018-08-28T14:36:47.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from .. import models class ExpressRouteServiceProvidersOperations(object): """ExpressRouteServiceProvidersOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client API version. Constant value: "2017-11-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2017-11-01" self.config = config def list( self, custom_headers=None, raw=False, **operation_config): """Gets all the available express route service providers. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of ExpressRouteServiceProvider :rtype: ~azure.mgmt.network.v2017_11_01.models.ExpressRouteServiceProviderPaged[~azure.mgmt.network.v2017_11_01.models.ExpressRouteServiceProvider] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.ExpressRouteServiceProviderPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.ExpressRouteServiceProviderPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'}
41.320388
148
0.645207
import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from .. import models class ExpressRouteServiceProvidersOperations(object): models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2017-11-01" self.config = config def list( self, custom_headers=None, raw=False, **operation_config): def internal_paging(next_link=None, raw=False): if not next_link: url = self.list.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response deserialized = models.ExpressRouteServiceProviderPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.ExpressRouteServiceProviderPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'}
true
true
1c3567fa76301f500220e63d1bfa3d3d32bc4fe7
797
py
Python
build/common.py
iguoyr/chaos-mesh
13590d3494d7686a43ae5d52c2902fcbb7acb81a
[ "Apache-2.0" ]
null
null
null
build/common.py
iguoyr/chaos-mesh
13590d3494d7686a43ae5d52c2902fcbb7acb81a
[ "Apache-2.0" ]
null
null
null
build/common.py
iguoyr/chaos-mesh
13590d3494d7686a43ae5d52c2902fcbb7acb81a
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 Chaos Mesh Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ varaibles used commonly in multiple scripts """ export_env_variables = [ "HTTP_PROXY", "HTTPS_PROXY", "GOPROXY", "UI", "LDFLAGS", "CRATES_MIRROR", "GO_BUILD_CACHE", "LDFLAGS", ]
26.566667
74
0.720201
export_env_variables = [ "HTTP_PROXY", "HTTPS_PROXY", "GOPROXY", "UI", "LDFLAGS", "CRATES_MIRROR", "GO_BUILD_CACHE", "LDFLAGS", ]
true
true
1c356b10ccbb315212dfd39936b221fd2442f8f8
1,306
py
Python
aiotdlib/api/functions/get_chat_notification_settings_exceptions.py
jraylan/aiotdlib
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
[ "MIT" ]
37
2021-05-04T10:41:41.000Z
2022-03-30T13:48:05.000Z
aiotdlib/api/functions/get_chat_notification_settings_exceptions.py
jraylan/aiotdlib
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
[ "MIT" ]
13
2021-07-17T19:54:51.000Z
2022-02-26T06:50:00.000Z
aiotdlib/api/functions/get_chat_notification_settings_exceptions.py
jraylan/aiotdlib
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
[ "MIT" ]
7
2021-09-22T21:27:11.000Z
2022-02-20T02:33:19.000Z
# =============================================================================== # # # # This file has been generated automatically!! Do not change this manually! # # # # =============================================================================== # from __future__ import annotations from pydantic import Field from ..base_object import BaseObject from ..types import NotificationSettingsScope class GetChatNotificationSettingsExceptions(BaseObject): """ Returns list of chats with non-default notification settings :param scope: If specified, only chats from the scope will be returned; pass null to return chats from all scopes :type scope: :class:`NotificationSettingsScope` :param compare_sound: If true, also chats with non-default sound will be returned :type compare_sound: :class:`bool` """ ID: str = Field("getChatNotificationSettingsExceptions", alias="@type") scope: NotificationSettingsScope compare_sound: bool @staticmethod def read(q: dict) -> GetChatNotificationSettingsExceptions: return GetChatNotificationSettingsExceptions.construct(**q)
39.575758
117
0.556662
from __future__ import annotations from pydantic import Field from ..base_object import BaseObject from ..types import NotificationSettingsScope class GetChatNotificationSettingsExceptions(BaseObject): ID: str = Field("getChatNotificationSettingsExceptions", alias="@type") scope: NotificationSettingsScope compare_sound: bool @staticmethod def read(q: dict) -> GetChatNotificationSettingsExceptions: return GetChatNotificationSettingsExceptions.construct(**q)
true
true
1c356edfccab0f3d653cbb144107522f8b7422ce
11,954
py
Python
qa/rpc-tests/p2p-acceptblock.py
josephsalimin/dogecoin
0ca38088628e2f9385776fc0ea52234f86a8a5e9
[ "MIT" ]
3
2021-05-15T15:30:40.000Z
2021-05-15T15:30:50.000Z
qa/rpc-tests/p2p-acceptblock.py
josephsalimin/dogecoin
0ca38088628e2f9385776fc0ea52234f86a8a5e9
[ "MIT" ]
1
2021-05-22T13:52:29.000Z
2021-05-22T14:05:23.000Z
qa/rpc-tests/p2p-acceptblock.py
josephsalimin/garudacoin
0ca38088628e2f9385776fc0ea52234f86a8a5e9
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time from test_framework.blocktools import create_block, create_coinbase ''' AcceptBlockTest -- test processing of unrequested blocks. Since behavior differs when receiving unrequested blocks from whitelisted peers versus non-whitelisted peers, this tests the behavior of both (effectively two separate tests running in parallel). Setup: two nodes, node0 and node1, not connected to each other. Node0 does not whitelist localhost, but node1 does. They will each be on their own chain for this test. We have one NodeConn connection to each, test_node and white_node respectively. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance. 3. Mine a block that forks the previous block, and deliver to each node from corresponding peer. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more work than the tip. Node1 should process because this is coming from a whitelisted peer. 4. Send another block that builds on the forking block. Node0 should process this block but be stuck on the shorter chain, because it's missing an intermediate block. Node1 should reorg to this longer chain. 4b.Send 288 more blocks on the longer chain. Node0 should process all but the last block (too far ahead in height). Send all headers to Node1, and then send the last block in that chain. Node1 should accept the block because it's coming from a whitelisted peer. 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. ''' # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() def add_connection(self, conn): self.connection = conn # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message # Spin until verack message is received from the node. # We use this to signal that our test can begin. This # is called from the testing thread, so it needs to acquire # the global lock. def wait_for_verack(self): while True: with mininode_lock: if self.verack_received: return time.sleep(0.05) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): self.connection.send_message(msg_ping(nonce=self.ping_counter)) received_pong = False sleep_time = 0.05 while not received_pong and timeout > 0: time.sleep(sleep_time) timeout -= sleep_time with mininode_lock: if self.last_pong.nonce == self.ping_counter: received_pong = True self.ping_counter += 1 return received_pong class AcceptBlockTest(BitcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("DOGECOIND", "garudacoind"), help="garudacoind binary to test") def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"], binary=self.options.testbinary)) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1"], binary=self.options.testbinary)) def run_test(self): # Setup the p2p connections and start up the network thread. test_node = TestNode() # connects to node0 (not whitelisted) white_node = TestNode() # connects to node1 (whitelisted) connections = [] connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() # 1. Have both nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted. blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) white_node.send_message(msg_block(blocks_h2[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) print("First height 2 block accepted by both nodes") # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain for i in range(2): blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1)) blocks_h2f[i].solve() test_node.send_message(msg_block(blocks_h2f[0])) white_node.send_message(msg_block(blocks_h2f[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") print("Second height 2 block accepted only from whitelisted peer") # 4. Now send another block that builds on the forking chain. blocks_h3 = [] for i in range(2): blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1)) blocks_h3[i].solve() test_node.send_message(msg_block(blocks_h3[0])) white_node.send_message(msg_block(blocks_h3[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] # Since the earlier block was not processed by node0, the new block # can't be fully validated. for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. self.nodes[0].getblock(blocks_h3[0].hash) print("Unrequested more-work block accepted from non-whitelisted peer") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) print("Successfully reorged to length 3 chain from whitelisted peer") # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 headers_message = msg_headers() all_blocks = [] # node0's blocks for j in range(2): for i in range(288): next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1) next_block.solve() if j==0: test_node.send_message(msg_block(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) tips[j] = next_block time.sleep(2) # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip white_node.send_message(msg_block(tips[1])) # Now deliver the tip white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) print("Unrequested block far ahead of tip accepted from whitelisted peer") # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). test_node.send_message(msg_block(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) print("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_getdata = None test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_getdata # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) print("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) print("Successfully reorged to longer chain from non-whitelisted peer") [ c.disconnect_node() for c in connections ] if __name__ == '__main__': AcceptBlockTest().main()
42.692857
107
0.657771
from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time from test_framework.blocktools import create_block, create_coinbase class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() def add_connection(self, conn): self.connection = conn def on_getdata(self, conn, message): self.last_getdata = message def wait_for_verack(self): while True: with mininode_lock: if self.verack_received: return time.sleep(0.05) def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): self.connection.send_message(msg_ping(nonce=self.ping_counter)) received_pong = False sleep_time = 0.05 while not received_pong and timeout > 0: time.sleep(sleep_time) timeout -= sleep_time with mininode_lock: if self.last_pong.nonce == self.ping_counter: received_pong = True self.ping_counter += 1 return received_pong class AcceptBlockTest(BitcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("DOGECOIND", "garudacoind"), help="garudacoind binary to test") def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"], binary=self.options.testbinary)) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1"], binary=self.options.testbinary)) def run_test(self): # Setup the p2p connections and start up the network thread. test_node = TestNode() # connects to node0 (not whitelisted) white_node = TestNode() # connects to node1 (whitelisted) connections = [] connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() # 1. Have both nodes mine a block (leave IBD) [ n.generate(1) for n in self.nodes ] tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ] # 2. Send one block that builds on each tip. # This should be accepted. blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) white_node.send_message(msg_block(blocks_h2[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) print("First height 2 block accepted by both nodes") blocks_h2f = [] for i in range(2): blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1)) blocks_h2f[i].solve() test_node.send_message(msg_block(blocks_h2f[0])) white_node.send_message(msg_block(blocks_h2f[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") print("Second height 2 block accepted only from whitelisted peer") blocks_h3 = [] for i in range(2): blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1)) blocks_h3[i].solve() test_node.send_message(msg_block(blocks_h3[0])) white_node.send_message(msg_block(blocks_h3[1])) [ x.sync_with_ping() for x in [test_node, white_node] ] for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. self.nodes[0].getblock(blocks_h3[0].hash) print("Unrequested more-work block accepted from non-whitelisted peer") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) print("Successfully reorged to length 3 chain from whitelisted peer") # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 headers_message = msg_headers() all_blocks = [] # node0's blocks for j in range(2): for i in range(288): next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1) next_block.solve() if j==0: test_node.send_message(msg_block(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) tips[j] = next_block time.sleep(2) for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip white_node.send_message(msg_block(tips[1])) # Now deliver the tip white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) print("Unrequested block far ahead of tip accepted from whitelisted peer") # 5. Test handling of unrequested block on the node that didn't process test_node.send_message(msg_block(blocks_h2f[0])) # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) print("Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_getdata = None test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_getdata # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) print("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) print("Successfully reorged to longer chain from non-whitelisted peer") [ c.disconnect_node() for c in connections ] if __name__ == '__main__': AcceptBlockTest().main()
true
true
1c356f9f653fa9074e5403e150307fe4402f031b
2,463
py
Python
home/wasa/.config/ranger/colorschemes/black.py
wasamasa/dotfiles
bbbdfdef90ad96258fd91aa046d9c9a406ecd8ee
[ "Unlicense" ]
11
2016-01-01T16:36:54.000Z
2019-10-05T08:38:28.000Z
home/wasa/.config/ranger/colorschemes/black.py
wasamasa/dotfiles
bbbdfdef90ad96258fd91aa046d9c9a406ecd8ee
[ "Unlicense" ]
null
null
null
home/wasa/.config/ranger/colorschemes/black.py
wasamasa/dotfiles
bbbdfdef90ad96258fd91aa046d9c9a406ecd8ee
[ "Unlicense" ]
3
2017-07-27T15:32:00.000Z
2019-03-25T01:38:28.000Z
# Copyright (C) 2009, 2010, 2011 Roman Zimbelmann <romanz@lavabit.com> # This software is distributed under the terms of the GNU GPL version 3. from ranger.gui.colorscheme import ColorScheme from ranger.gui.color import * class Default(ColorScheme): progress_bar_color = black def use(self, context): fg, bg, attr = default_colors if context.reset: return default_colors elif context.in_browser: if context.selected: attr = reverse else: attr = normal if context.empty or context.error: bg = red if context.border: fg = default if context.media: if context.image: fg = yellow else: fg = magenta if context.container: fg = red if context.directory: attr |= bold fg = blue elif context.executable and not \ any((context.media, context.container, context.fifo, context.socket)): attr |= bold fg = green if context.socket: fg = magenta attr |= bold if context.fifo or context.device: fg = yellow if context.device: attr |= bold if context.link: fg = context.good and cyan or magenta if context.tag_marker and not context.selected: attr |= bold if fg in (red, magenta): fg = white else: fg = red if not context.selected and (context.cut or context.copied): fg = black attr |= bold if context.main_column: if context.selected: attr |= bold if context.marked: attr |= bold fg = yellow if context.badinfo: if attr & reverse: bg = magenta else: fg = magenta elif context.in_titlebar: attr |= bold if context.hostname: fg = context.bad and red or green elif context.directory: fg = blue elif context.tab: if context.good: bg = green elif context.link: fg = cyan elif context.in_statusbar: if context.permissions: if context.good: fg = cyan elif context.bad: fg = magenta if context.marked: attr |= bold | reverse fg = yellow if context.message: if context.bad: attr |= bold fg = red if context.loaded: bg = self.progress_bar_color if context.text: if context.highlight: attr |= reverse if context.in_taskview: if context.title: fg = blue if context.selected: attr |= reverse if context.loaded: if context.selected: fg = self.progress_bar_color else: bg = self.progress_bar_color return fg, bg, attr
21.232759
72
0.642306
from ranger.gui.colorscheme import ColorScheme from ranger.gui.color import * class Default(ColorScheme): progress_bar_color = black def use(self, context): fg, bg, attr = default_colors if context.reset: return default_colors elif context.in_browser: if context.selected: attr = reverse else: attr = normal if context.empty or context.error: bg = red if context.border: fg = default if context.media: if context.image: fg = yellow else: fg = magenta if context.container: fg = red if context.directory: attr |= bold fg = blue elif context.executable and not \ any((context.media, context.container, context.fifo, context.socket)): attr |= bold fg = green if context.socket: fg = magenta attr |= bold if context.fifo or context.device: fg = yellow if context.device: attr |= bold if context.link: fg = context.good and cyan or magenta if context.tag_marker and not context.selected: attr |= bold if fg in (red, magenta): fg = white else: fg = red if not context.selected and (context.cut or context.copied): fg = black attr |= bold if context.main_column: if context.selected: attr |= bold if context.marked: attr |= bold fg = yellow if context.badinfo: if attr & reverse: bg = magenta else: fg = magenta elif context.in_titlebar: attr |= bold if context.hostname: fg = context.bad and red or green elif context.directory: fg = blue elif context.tab: if context.good: bg = green elif context.link: fg = cyan elif context.in_statusbar: if context.permissions: if context.good: fg = cyan elif context.bad: fg = magenta if context.marked: attr |= bold | reverse fg = yellow if context.message: if context.bad: attr |= bold fg = red if context.loaded: bg = self.progress_bar_color if context.text: if context.highlight: attr |= reverse if context.in_taskview: if context.title: fg = blue if context.selected: attr |= reverse if context.loaded: if context.selected: fg = self.progress_bar_color else: bg = self.progress_bar_color return fg, bg, attr
true
true
1c35710b2cef0d92c7de221b7f909da16e1b497c
2,889
py
Python
server.py
kurodenjiro/bert-extractive-summarizer-vietnamese
f316ca4eaf142dc306086d434d5bba31e1718131
[ "MIT" ]
27
2019-12-06T06:17:12.000Z
2022-02-08T02:57:22.000Z
server.py
nguyenhothanhtam/bert-extractive-summarizer-vietnamese
f316ca4eaf142dc306086d434d5bba31e1718131
[ "MIT" ]
1
2020-01-28T05:16:46.000Z
2020-03-12T09:05:47.000Z
server.py
nguyenhothanhtam/bert-extractive-summarizer-vietnamese
f316ca4eaf142dc306086d434d5bba31e1718131
[ "MIT" ]
11
2019-12-06T06:48:11.000Z
2021-07-12T02:45:40.000Z
from flask import Flask from flask import request, jsonify, abort, make_response from flask_cors import CORS import nltk nltk.download('punkt') from nltk import tokenize from typing import List import argparse from summarizer import Summarizer app = Flask(__name__) CORS(app) class Parser(object): def __init__(self, raw_text: bytes): self.all_data = str(raw_text, 'utf-8').split('\n') def __isint(self, v) -> bool: try: int(v) return True except: return False def __should_skip(self, v) -> bool: return self.__isint(v) or v == '\n' or '-->' in v def __process_sentences(self, v) -> List[str]: sentence = tokenize.sent_tokenize(v) return sentence def save_data(self, save_path, sentences) -> None: with open(save_path, 'w') as f: for sentence in sentences: f.write("%s\n" % sentence) def run(self) -> List[str]: total: str = '' for data in self.all_data: if not self.__should_skip(data): cleaned = data.replace('&gt;', '').replace('\n', '').strip() if cleaned: total += ' ' + cleaned sentences = self.__process_sentences(total) return sentences def convert_to_paragraphs(self) -> str: sentences: List[str] = self.run() return ' '.join([sentence.strip() for sentence in sentences]).strip() @app.route('/summarize', methods=['POST']) def convert_raw_text(): ratio = float(request.args.get('ratio', 0.2)) min_length = int(request.args.get('min_length', 25)) max_length = int(request.args.get('max_length', 500)) data = request.data if not data: abort(make_response(jsonify(message="Request must have raw text"), 400)) parsed = Parser(data).convert_to_paragraphs() summary = summarizer(parsed, ratio=ratio, min_length=min_length, max_length=max_length) return jsonify({ 'summary': summary }) if __name__ == '__main__': parser = argparse.ArgumentParser(description='') parser.add_argument('-model', dest='model', default='bert-base-multilingual-uncased', help='The model to use') parser.add_argument('-greediness', dest='greediness', help='', default=0.45) parser.add_argument('-reduce', dest='reduce', help='', default='mean') parser.add_argument('-hidden', dest='hidden', help='', default=-2) parser.add_argument('-port', dest='port', help='', default=5000) parser.add_argument('-host', dest='host', help='', default='0.0.0.0') args = parser.parse_args() print(f"Using Model: {args.model}") summarizer = Summarizer( model=args.model, hidden=int(args.hidden), reduce_option=args.reduce, greedyness=float(args.greediness) ) app.run(host=args.host, port=int(args.port) ,debug=True)
31.064516
114
0.628245
from flask import Flask from flask import request, jsonify, abort, make_response from flask_cors import CORS import nltk nltk.download('punkt') from nltk import tokenize from typing import List import argparse from summarizer import Summarizer app = Flask(__name__) CORS(app) class Parser(object): def __init__(self, raw_text: bytes): self.all_data = str(raw_text, 'utf-8').split('\n') def __isint(self, v) -> bool: try: int(v) return True except: return False def __should_skip(self, v) -> bool: return self.__isint(v) or v == '\n' or '-->' in v def __process_sentences(self, v) -> List[str]: sentence = tokenize.sent_tokenize(v) return sentence def save_data(self, save_path, sentences) -> None: with open(save_path, 'w') as f: for sentence in sentences: f.write("%s\n" % sentence) def run(self) -> List[str]: total: str = '' for data in self.all_data: if not self.__should_skip(data): cleaned = data.replace('&gt;', '').replace('\n', '').strip() if cleaned: total += ' ' + cleaned sentences = self.__process_sentences(total) return sentences def convert_to_paragraphs(self) -> str: sentences: List[str] = self.run() return ' '.join([sentence.strip() for sentence in sentences]).strip() @app.route('/summarize', methods=['POST']) def convert_raw_text(): ratio = float(request.args.get('ratio', 0.2)) min_length = int(request.args.get('min_length', 25)) max_length = int(request.args.get('max_length', 500)) data = request.data if not data: abort(make_response(jsonify(message="Request must have raw text"), 400)) parsed = Parser(data).convert_to_paragraphs() summary = summarizer(parsed, ratio=ratio, min_length=min_length, max_length=max_length) return jsonify({ 'summary': summary }) if __name__ == '__main__': parser = argparse.ArgumentParser(description='') parser.add_argument('-model', dest='model', default='bert-base-multilingual-uncased', help='The model to use') parser.add_argument('-greediness', dest='greediness', help='', default=0.45) parser.add_argument('-reduce', dest='reduce', help='', default='mean') parser.add_argument('-hidden', dest='hidden', help='', default=-2) parser.add_argument('-port', dest='port', help='', default=5000) parser.add_argument('-host', dest='host', help='', default='0.0.0.0') args = parser.parse_args() print(f"Using Model: {args.model}") summarizer = Summarizer( model=args.model, hidden=int(args.hidden), reduce_option=args.reduce, greedyness=float(args.greediness) ) app.run(host=args.host, port=int(args.port) ,debug=True)
true
true
1c35736839de6098a178f6638a6100d0d931d311
463
py
Python
data/scripts/templates/object/draft_schematic/space/armor/shared_mass_reduction_kit_mk4.py
obi-two/GameServer
7d37024e2291a97d49522610cd8f1dbe5666afc2
[ "MIT" ]
20
2015-02-23T15:11:56.000Z
2022-03-18T20:56:48.000Z
data/scripts/templates/object/draft_schematic/space/armor/shared_mass_reduction_kit_mk4.py
apathyboy/swganh
665128efe9154611dec4cb5efc61d246dd095984
[ "MIT" ]
null
null
null
data/scripts/templates/object/draft_schematic/space/armor/shared_mass_reduction_kit_mk4.py
apathyboy/swganh
665128efe9154611dec4cb5efc61d246dd095984
[ "MIT" ]
20
2015-04-04T16:35:59.000Z
2022-03-24T14:54:37.000Z
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Intangible() result.template = "object/draft_schematic/space/armor/shared_mass_reduction_kit_mk4.iff" result.attribute_template_id = -1 result.stfName("string_id_table","") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
27.235294
89
0.734341
true
true
1c357403ff98f63e9f914ae91abc215018fb5b99
10,620
py
Python
lib/log/reader.py
kustodian/aerospike-admin
931ee55ccd65ba3e20e6611a0294c92b09e8cfcb
[ "Apache-2.0" ]
null
null
null
lib/log/reader.py
kustodian/aerospike-admin
931ee55ccd65ba3e20e6611a0294c92b09e8cfcb
[ "Apache-2.0" ]
null
null
null
lib/log/reader.py
kustodian/aerospike-admin
931ee55ccd65ba3e20e6611a0294c92b09e8cfcb
[ "Apache-2.0" ]
null
null
null
# Copyright 2013-2018 Aerospike, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import re import time import logging from lib.utils.util import shell_command from lib.utils.constants import DT_FMT DT_TO_MINUTE_FMT = "%b %d %Y %H:%M" DT_TIME_FMT = "%H:%M:%S" DATE_SEG = 0 DATE_SEPARATOR = "-" YEAR = 0 MONTH = 1 DATE = 2 TIME_SEG = 1 TIME_SEPARATOR = ":" HH = 0 MM = 1 SS = 2 INDEX_DT_LEN = 4 STEP = 1000 SERVER_ID_FETCH_READ_SIZE = 10000 FILE_READ_ENDS = ["tail", "head"] class LogReader(object): server_log_ext = "/aerospike.log" server_log_file_identifier = [ "thr_info.c::", "heartbeat_received", "Cluster_size"] server_log_file_identifier_pattern = "(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{2} \d{4} \d{2}:\d{2}:\d{2} GMT([-+]\d+){0,1}: (?:INFO|WARNING|DEBUG|DETAIL) \([a-z_:]+\): \([A-Za-z_\.\[\]]+:{1,2}-?[\d]+\)" logger = logging.getLogger('asadm') def get_server_node_id(self, file, fetch_end="tail", read_block_size=SERVER_ID_FETCH_READ_SIZE): if not fetch_end or fetch_end not in FILE_READ_ENDS: fetch_end = "tail" if not read_block_size: read_block_size = SERVER_ID_FETCH_READ_SIZE not_found = "" # pattern for logs of old server (< 3.9) is "node id " # pattern for logs of new server (>= 3.9) is "NODE-ID " server_log_node_identifiers = ["node id ", "NODE-ID "] server_node_id_pattern = "%s([0-9a-fA-F]+(\s|$))" block_to_check = 100 if not file: return not_found try: out, err = shell_command( ['%s -n %d "%s"' % (fetch_end, read_block_size, file)]) except Exception: return not_found if err or not out: return not_found lines = out.strip().split('\n') try: if lines: fetched_line_count = len(lines) end_index = fetched_line_count start_index = end_index - \ (block_to_check if block_to_check < end_index else end_index) while start_index >= 0 and start_index < end_index: one_string = " ".join(lines[start_index:end_index]) if any(id in one_string for id in server_log_node_identifiers): for line in reversed(lines[start_index:end_index]): for id in server_log_node_identifiers: if id in line: try: node_id = re.search( server_node_id_pattern % (id), line.strip()).group(1) if node_id: return node_id except Exception: pass end_index = start_index start_index = end_index - \ (block_to_check if block_to_check < end_index else end_index) except Exception: pass if fetch_end == "tail": return self.get_server_node_id(file=file, fetch_end="head", read_block_size=read_block_size) return not_found def is_server_log_file(self, file=""): if not file: return False try: out, err = shell_command(['head -n 10 "%s"' % (file)]) except Exception: return False if err or not out: return False lines = out.strip().split('\n') matched_count = 0 for line in lines: try: if re.search(self.server_log_file_identifier_pattern, line): matched_count += 1 except Exception: pass if matched_count > (len(lines)/2): return True return False def get_grep_string(self, strs, file, is_and=False, is_casesensitive=True): search_str = "" if not strs: return search_str if not isinstance(strs, list): return search_str grep_cmd = "grep " if not is_casesensitive: grep_cmd += "-i " g_str = strs[0] if is_and: search_str = "%s \"%s\" \"%s\"" % (grep_cmd, g_str, file) for str in strs[1:len(strs)]: search_str += "|" + "%s \"%s\"" % (grep_cmd, str) else: for str in strs[1:len(strs)]: g_str += "\\|" + str search_str = "%s \"%s\" \"%s\"" % (grep_cmd, g_str, file) return search_str def parse_timedelta(self, arg): toks = arg.split(":") num_toks = len(toks) if num_toks > 3: return 0 toks.reverse() try: arg_seconds = long(toks[0].strip()) if num_toks > 1: arg_seconds = arg_seconds + (60 * long(toks[1].strip())) if num_toks > 2: arg_seconds = arg_seconds + (3600 * long(toks[2].strip())) except Exception: return 0 return datetime.timedelta(seconds=arg_seconds) def parse_init_dt(self, arg_from, tail_dt): if arg_from.startswith("-"): # Relative start time: try: init_dt = tail_dt - self.parse_timedelta(arg_from.strip("- ")) except Exception: self.logger.warning( "Ignoring relative start time. Can't parse relative start time " + arg_from) return 0 else: # Absolute start time: try: init_dt = datetime.datetime( *(time.strptime(arg_from, DT_FMT)[0:6])) except Exception as e: self.logger.warning( "Ignoring absolute start time. Can't parse absolute start time " + arg_from + " " + str(e)) return 0 return init_dt def _get_dt(self, line): return line[0: line.find(" GMT")] def parse_dt(self, line, dt_len=6): prefix = line[0: line.find(" GMT")].split(",")[0] return datetime.datetime(*(time.strptime(prefix, DT_FMT)[0:dt_len])) def _seek_to(self, f, c): if f and c: if f.tell() <= 0: f.seek(0, 0) else: tmp = f.read(1) while tmp != c: if f.tell() <= 1: f.seek(0, 0) break f.seek(-2, 1) tmp = f.read(1) def set_next_line(self, file_stream, jump=STEP, whence=1): file_stream.seek(int(jump), whence) self._seek_to(file_stream, "\n") def read_next_line(self, file_stream, jump=STEP, whence=1): file_stream.seek(int(jump), whence) self._seek_to(file_stream, "\n") ln = self.read_line(file_stream) return ln def _get_next_timestamp(self, f, min, max, last): self.set_next_line(f, max, 0) max = f.tell() self.set_next_line(f, min, 0) min = f.tell() if min >= max: f.seek(max) tm = self.parse_dt(self.read_line(f), dt_len=INDEX_DT_LEN) if tm > last: return max, tm else: return None, None if min == max: f.seek(min) tm = self.parse_dt(self.read_line(f), dt_len=INDEX_DT_LEN) if tm > last: return min, tm else: return None, None jump = (max - min) / 2 f.seek(int(jump) + min, 0) self._seek_to(f, '\n') last_read = f.tell() ln = self.read_line(f) tm = self.parse_dt(ln, dt_len=INDEX_DT_LEN) if tm <= last: return self._get_next_timestamp(f, f.tell(), max, last) else: return self._get_next_timestamp(f, min, last_read, last) def generate_server_log_indices(self, file_path): indices = {} try: f = open(file_path, 'r') start_timestamp = self.parse_dt(self.read_line(f), dt_len=INDEX_DT_LEN) indices[start_timestamp.strftime(DT_FMT)] = 0 min_seek_pos = 0 f.seek(0, 2) self.set_next_line(f, 0) last_pos = f.tell() f.seek(0, 0) last_timestamp = start_timestamp while True: if last_pos < (min_seek_pos + STEP): ln = self.read_next_line(f, last_pos, 0) else: ln = self.read_next_line(f) current_jump = 1000 while(self.parse_dt(ln, dt_len=INDEX_DT_LEN) <= last_timestamp): min_seek_pos = f.tell() if last_pos < (min_seek_pos + current_jump): ln = self.read_next_line(f, last_pos, 0) break else: ln = self.read_next_line(f, current_jump) current_jump *= 2 if self.parse_dt(ln, dt_len=INDEX_DT_LEN) <= last_timestamp: break max_seek_pos = f.tell() pos, tm = self._get_next_timestamp( f, min_seek_pos, max_seek_pos, last_timestamp) if not tm and not pos: break indices[tm.strftime(DT_FMT)] = pos f.seek(pos) min_seek_pos = pos last_timestamp = tm except Exception: pass return indices def read_line(self, f): if not f: return None ln = None while True: try: # checking for valid line with timestamp ln = f.readline() tm = self.parse_dt(ln) break except Exception: pass return ln
34.480519
221
0.512712
import datetime import re import time import logging from lib.utils.util import shell_command from lib.utils.constants import DT_FMT DT_TO_MINUTE_FMT = "%b %d %Y %H:%M" DT_TIME_FMT = "%H:%M:%S" DATE_SEG = 0 DATE_SEPARATOR = "-" YEAR = 0 MONTH = 1 DATE = 2 TIME_SEG = 1 TIME_SEPARATOR = ":" HH = 0 MM = 1 SS = 2 INDEX_DT_LEN = 4 STEP = 1000 SERVER_ID_FETCH_READ_SIZE = 10000 FILE_READ_ENDS = ["tail", "head"] class LogReader(object): server_log_ext = "/aerospike.log" server_log_file_identifier = [ "thr_info.c::", "heartbeat_received", "Cluster_size"] server_log_file_identifier_pattern = "(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{2} \d{4} \d{2}:\d{2}:\d{2} GMT([-+]\d+){0,1}: (?:INFO|WARNING|DEBUG|DETAIL) \([a-z_:]+\): \([A-Za-z_\.\[\]]+:{1,2}-?[\d]+\)" logger = logging.getLogger('asadm') def get_server_node_id(self, file, fetch_end="tail", read_block_size=SERVER_ID_FETCH_READ_SIZE): if not fetch_end or fetch_end not in FILE_READ_ENDS: fetch_end = "tail" if not read_block_size: read_block_size = SERVER_ID_FETCH_READ_SIZE not_found = "" server_log_node_identifiers = ["node id ", "NODE-ID "] server_node_id_pattern = "%s([0-9a-fA-F]+(\s|$))" block_to_check = 100 if not file: return not_found try: out, err = shell_command( ['%s -n %d "%s"' % (fetch_end, read_block_size, file)]) except Exception: return not_found if err or not out: return not_found lines = out.strip().split('\n') try: if lines: fetched_line_count = len(lines) end_index = fetched_line_count start_index = end_index - \ (block_to_check if block_to_check < end_index else end_index) while start_index >= 0 and start_index < end_index: one_string = " ".join(lines[start_index:end_index]) if any(id in one_string for id in server_log_node_identifiers): for line in reversed(lines[start_index:end_index]): for id in server_log_node_identifiers: if id in line: try: node_id = re.search( server_node_id_pattern % (id), line.strip()).group(1) if node_id: return node_id except Exception: pass end_index = start_index start_index = end_index - \ (block_to_check if block_to_check < end_index else end_index) except Exception: pass if fetch_end == "tail": return self.get_server_node_id(file=file, fetch_end="head", read_block_size=read_block_size) return not_found def is_server_log_file(self, file=""): if not file: return False try: out, err = shell_command(['head -n 10 "%s"' % (file)]) except Exception: return False if err or not out: return False lines = out.strip().split('\n') matched_count = 0 for line in lines: try: if re.search(self.server_log_file_identifier_pattern, line): matched_count += 1 except Exception: pass if matched_count > (len(lines)/2): return True return False def get_grep_string(self, strs, file, is_and=False, is_casesensitive=True): search_str = "" if not strs: return search_str if not isinstance(strs, list): return search_str grep_cmd = "grep " if not is_casesensitive: grep_cmd += "-i " g_str = strs[0] if is_and: search_str = "%s \"%s\" \"%s\"" % (grep_cmd, g_str, file) for str in strs[1:len(strs)]: search_str += "|" + "%s \"%s\"" % (grep_cmd, str) else: for str in strs[1:len(strs)]: g_str += "\\|" + str search_str = "%s \"%s\" \"%s\"" % (grep_cmd, g_str, file) return search_str def parse_timedelta(self, arg): toks = arg.split(":") num_toks = len(toks) if num_toks > 3: return 0 toks.reverse() try: arg_seconds = long(toks[0].strip()) if num_toks > 1: arg_seconds = arg_seconds + (60 * long(toks[1].strip())) if num_toks > 2: arg_seconds = arg_seconds + (3600 * long(toks[2].strip())) except Exception: return 0 return datetime.timedelta(seconds=arg_seconds) def parse_init_dt(self, arg_from, tail_dt): if arg_from.startswith("-"): try: init_dt = tail_dt - self.parse_timedelta(arg_from.strip("- ")) except Exception: self.logger.warning( "Ignoring relative start time. Can't parse relative start time " + arg_from) return 0 else: # Absolute start time: try: init_dt = datetime.datetime( *(time.strptime(arg_from, DT_FMT)[0:6])) except Exception as e: self.logger.warning( "Ignoring absolute start time. Can't parse absolute start time " + arg_from + " " + str(e)) return 0 return init_dt def _get_dt(self, line): return line[0: line.find(" GMT")] def parse_dt(self, line, dt_len=6): prefix = line[0: line.find(" GMT")].split(",")[0] return datetime.datetime(*(time.strptime(prefix, DT_FMT)[0:dt_len])) def _seek_to(self, f, c): if f and c: if f.tell() <= 0: f.seek(0, 0) else: tmp = f.read(1) while tmp != c: if f.tell() <= 1: f.seek(0, 0) break f.seek(-2, 1) tmp = f.read(1) def set_next_line(self, file_stream, jump=STEP, whence=1): file_stream.seek(int(jump), whence) self._seek_to(file_stream, "\n") def read_next_line(self, file_stream, jump=STEP, whence=1): file_stream.seek(int(jump), whence) self._seek_to(file_stream, "\n") ln = self.read_line(file_stream) return ln def _get_next_timestamp(self, f, min, max, last): self.set_next_line(f, max, 0) max = f.tell() self.set_next_line(f, min, 0) min = f.tell() if min >= max: f.seek(max) tm = self.parse_dt(self.read_line(f), dt_len=INDEX_DT_LEN) if tm > last: return max, tm else: return None, None if min == max: f.seek(min) tm = self.parse_dt(self.read_line(f), dt_len=INDEX_DT_LEN) if tm > last: return min, tm else: return None, None jump = (max - min) / 2 f.seek(int(jump) + min, 0) self._seek_to(f, '\n') last_read = f.tell() ln = self.read_line(f) tm = self.parse_dt(ln, dt_len=INDEX_DT_LEN) if tm <= last: return self._get_next_timestamp(f, f.tell(), max, last) else: return self._get_next_timestamp(f, min, last_read, last) def generate_server_log_indices(self, file_path): indices = {} try: f = open(file_path, 'r') start_timestamp = self.parse_dt(self.read_line(f), dt_len=INDEX_DT_LEN) indices[start_timestamp.strftime(DT_FMT)] = 0 min_seek_pos = 0 f.seek(0, 2) self.set_next_line(f, 0) last_pos = f.tell() f.seek(0, 0) last_timestamp = start_timestamp while True: if last_pos < (min_seek_pos + STEP): ln = self.read_next_line(f, last_pos, 0) else: ln = self.read_next_line(f) current_jump = 1000 while(self.parse_dt(ln, dt_len=INDEX_DT_LEN) <= last_timestamp): min_seek_pos = f.tell() if last_pos < (min_seek_pos + current_jump): ln = self.read_next_line(f, last_pos, 0) break else: ln = self.read_next_line(f, current_jump) current_jump *= 2 if self.parse_dt(ln, dt_len=INDEX_DT_LEN) <= last_timestamp: break max_seek_pos = f.tell() pos, tm = self._get_next_timestamp( f, min_seek_pos, max_seek_pos, last_timestamp) if not tm and not pos: break indices[tm.strftime(DT_FMT)] = pos f.seek(pos) min_seek_pos = pos last_timestamp = tm except Exception: pass return indices def read_line(self, f): if not f: return None ln = None while True: try: ln = f.readline() tm = self.parse_dt(ln) break except Exception: pass return ln
true
true
1c3576cd54023c4cac8155d1c7efed0c38748869
8,443
py
Python
win32/bin/Lib/lib2to3/fixes/fix_metaclass.py
nmercier/linux-cross-gcc
a5b0028fd2b72ec036a4725e93ba29d73cb753a6
[ "BSD-3-Clause" ]
3
2015-10-31T10:39:25.000Z
2019-04-27T20:19:33.000Z
win32/bin/Lib/lib2to3/fixes/fix_metaclass.py
nmercier/linux-cross-gcc
a5b0028fd2b72ec036a4725e93ba29d73cb753a6
[ "BSD-3-Clause" ]
2
2016-12-12T05:54:58.000Z
2016-12-12T05:55:44.000Z
win32/bin/Lib/lib2to3/fixes/fix_metaclass.py
nmercier/linux-cross-gcc
a5b0028fd2b72ec036a4725e93ba29d73cb753a6
[ "BSD-3-Clause" ]
null
null
null
"""Fixer for __metaclass__ = X -> (metaclass=X) methods. The various forms of classef (inherits nothing, inherits once, inherints many) don't parse the same in the CST so we look at ALL classes for a __metaclass__ and if we find one normalize the inherits to all be an arglist. For one-liner classes ('class X: pass') there is no indent/dedent so we normalize those into having a suite. Moving the __metaclass__ into the classdef can also cause the class body to be empty so there is some special casing for that as well. This fixer also tries very hard to keep original indenting and spacing in all those corner cases. """ # Author: Jack Diederich # Local imports from .. import fixer_base from ..pygram import token from ..fixer_util import Name, syms, Node, Leaf def has_metaclass(parent): """ we have to check the cls_node without changing it. There are two possiblities: 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') """ for node in parent.children: if node.type == syms.suite: return has_metaclass(node) elif node.type == syms.simple_stmt and node.children: expr_node = node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: left_side = expr_node.children[0] if isinstance(left_side, Leaf) and \ left_side.value == '__metaclass__': return True return False def fixup_parse_tree(cls_node): """ one-line classes don't get a suite in the parse tree so we add one to normalize the tree """ for node in cls_node.children: if node.type == syms.suite: # already in the preferred format, do nothing return # !%@#! oneliners have no suite node, we have to fake one up for i, node in enumerate(cls_node.children): if node.type == token.COLON: break else: raise ValueError("No class suite and no ':'!") # move everything into a suite node suite = Node(syms.suite, []) while cls_node.children[i+1:]: move_node = cls_node.children[i+1] suite.append_child(move_node.clone()) move_node.remove() cls_node.append_child(suite) node = suite def fixup_simple_stmt(parent, i, stmt_node): """ if there is a semi-colon all the parts count as part of the same simple_stmt. We just want the __metaclass__ part so we move everything after the semi-colon into its own simple_stmt node """ for semi_ind, node in enumerate(stmt_node.children): if node.type == token.SEMI: # *sigh* break else: return node.remove() # kill the semicolon new_expr = Node(syms.expr_stmt, []) new_stmt = Node(syms.simple_stmt, [new_expr]) while stmt_node.children[semi_ind:]: move_node = stmt_node.children[semi_ind] new_expr.append_child(move_node.clone()) move_node.remove() parent.insert_child(i, new_stmt) new_leaf1 = new_stmt.children[0].children[0] old_leaf1 = stmt_node.children[0].children[0] new_leaf1.prefix = old_leaf1.prefix def remove_trailing_newline(node): if node.children and node.children[-1].type == token.NEWLINE: node.children[-1].remove() def find_metas(cls_node): # find the suite node (Mmm, sweet nodes) for node in cls_node.children: if node.type == syms.suite: break else: raise ValueError("No class suite!") # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ] for i, simple_node in list(enumerate(node.children)): if simple_node.type == syms.simple_stmt and simple_node.children: expr_node = simple_node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: # Check if the expr_node is a simple assignment. left_node = expr_node.children[0] if isinstance(left_node, Leaf) and \ left_node.value == u'__metaclass__': # We found an assignment to __metaclass__. fixup_simple_stmt(node, i, simple_node) remove_trailing_newline(simple_node) yield (node, i, simple_node) def fixup_indent(suite): """ If an INDENT is followed by a thing with a prefix then nuke the prefix Otherwise we get in trouble when removing __metaclass__ at suite start """ kids = suite.children[::-1] # find the first indent while kids: node = kids.pop() if node.type == token.INDENT: break # find the first Leaf while kids: node = kids.pop() if isinstance(node, Leaf) and node.type != token.DEDENT: if node.prefix: node.prefix = u'' return else: kids.extend(node.children[::-1]) class FixMetaclass(fixer_base.BaseFix): BM_compatible = True PATTERN = """ classdef<any*> """ def transform(self, node, results): if not has_metaclass(node): return fixup_parse_tree(node) # find metaclasses, keep the last one last_metaclass = None for suite, i, stmt in find_metas(node): last_metaclass = stmt stmt.remove() text_type = node.children[0].type # always Leaf(nnn, 'class') # figure out what kind of classdef we have if len(node.children) == 7: # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite]) # 0 1 2 3 4 5 6 if node.children[3].type == syms.arglist: arglist = node.children[3] # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite]) else: parent = node.children[3].clone() arglist = Node(syms.arglist, [parent]) node.set_child(3, arglist) elif len(node.children) == 6: # Node(classdef, ['class', 'name', '(', ')', ':', suite]) # 0 1 2 3 4 5 arglist = Node(syms.arglist, []) node.insert_child(3, arglist) elif len(node.children) == 4: # Node(classdef, ['class', 'name', ':', suite]) # 0 1 2 3 arglist = Node(syms.arglist, []) node.insert_child(2, Leaf(token.RPAR, u')')) node.insert_child(2, arglist) node.insert_child(2, Leaf(token.LPAR, u'(')) else: raise ValueError("Unexpected class definition") # now stick the metaclass in the arglist meta_txt = last_metaclass.children[0].children[0] meta_txt.value = 'metaclass' orig_meta_prefix = meta_txt.prefix if arglist.children: arglist.append_child(Leaf(token.COMMA, u',')) meta_txt.prefix = u' ' else: meta_txt.prefix = u'' # compact the expression "metaclass = Meta" -> "metaclass=Meta" expr_stmt = last_metaclass.children[0] assert expr_stmt.type == syms.expr_stmt expr_stmt.children[1].prefix = u'' expr_stmt.children[2].prefix = u'' arglist.append_child(last_metaclass) fixup_indent(suite) # check for empty suite if not suite.children: # one-liner that was just __metaclass_ suite.remove() pass_leaf = Leaf(text_type, u'pass') pass_leaf.prefix = orig_meta_prefix node.append_child(pass_leaf) node.append_child(Leaf(token.NEWLINE, u'\n')) elif len(suite.children) > 1 and \ (suite.children[-2].type == token.INDENT and suite.children[-1].type == token.DEDENT): # there was only one line in the class body and it was __metaclass__ pass_leaf = Leaf(text_type, u'pass') suite.insert_child(-1, pass_leaf) suite.insert_child(-1, Leaf(token.NEWLINE, u'\n'))
36.868996
81
0.574559
from .. import fixer_base from ..pygram import token from ..fixer_util import Name, syms, Node, Leaf def has_metaclass(parent): for node in parent.children: if node.type == syms.suite: return has_metaclass(node) elif node.type == syms.simple_stmt and node.children: expr_node = node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: left_side = expr_node.children[0] if isinstance(left_side, Leaf) and \ left_side.value == '__metaclass__': return True return False def fixup_parse_tree(cls_node): for node in cls_node.children: if node.type == syms.suite: return if node.type == token.COLON: break else: raise ValueError("No class suite and no ':'!") suite = Node(syms.suite, []) while cls_node.children[i+1:]: move_node = cls_node.children[i+1] suite.append_child(move_node.clone()) move_node.remove() cls_node.append_child(suite) node = suite def fixup_simple_stmt(parent, i, stmt_node): for semi_ind, node in enumerate(stmt_node.children): if node.type == token.SEMI: break else: return node.remove() new_expr = Node(syms.expr_stmt, []) new_stmt = Node(syms.simple_stmt, [new_expr]) while stmt_node.children[semi_ind:]: move_node = stmt_node.children[semi_ind] new_expr.append_child(move_node.clone()) move_node.remove() parent.insert_child(i, new_stmt) new_leaf1 = new_stmt.children[0].children[0] old_leaf1 = stmt_node.children[0].children[0] new_leaf1.prefix = old_leaf1.prefix def remove_trailing_newline(node): if node.children and node.children[-1].type == token.NEWLINE: node.children[-1].remove() def find_metas(cls_node): for node in cls_node.children: if node.type == syms.suite: break else: raise ValueError("No class suite!") for i, simple_node in list(enumerate(node.children)): if simple_node.type == syms.simple_stmt and simple_node.children: expr_node = simple_node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: left_node = expr_node.children[0] if isinstance(left_node, Leaf) and \ left_node.value == u'__metaclass__': fixup_simple_stmt(node, i, simple_node) remove_trailing_newline(simple_node) yield (node, i, simple_node) def fixup_indent(suite): kids = suite.children[::-1] while kids: node = kids.pop() if node.type == token.INDENT: break while kids: node = kids.pop() if isinstance(node, Leaf) and node.type != token.DEDENT: if node.prefix: node.prefix = u'' return else: kids.extend(node.children[::-1]) class FixMetaclass(fixer_base.BaseFix): BM_compatible = True PATTERN = """ classdef<any*> """ def transform(self, node, results): if not has_metaclass(node): return fixup_parse_tree(node) last_metaclass = None for suite, i, stmt in find_metas(node): last_metaclass = stmt stmt.remove() text_type = node.children[0].type if len(node.children) == 7: if node.children[3].type == syms.arglist: arglist = node.children[3] else: parent = node.children[3].clone() arglist = Node(syms.arglist, [parent]) node.set_child(3, arglist) elif len(node.children) == 6: arglist = Node(syms.arglist, []) node.insert_child(3, arglist) elif len(node.children) == 4: arglist = Node(syms.arglist, []) node.insert_child(2, Leaf(token.RPAR, u')')) node.insert_child(2, arglist) node.insert_child(2, Leaf(token.LPAR, u'(')) else: raise ValueError("Unexpected class definition") meta_txt = last_metaclass.children[0].children[0] meta_txt.value = 'metaclass' orig_meta_prefix = meta_txt.prefix if arglist.children: arglist.append_child(Leaf(token.COMMA, u',')) meta_txt.prefix = u' ' else: meta_txt.prefix = u'' expr_stmt = last_metaclass.children[0] assert expr_stmt.type == syms.expr_stmt expr_stmt.children[1].prefix = u'' expr_stmt.children[2].prefix = u'' arglist.append_child(last_metaclass) fixup_indent(suite) if not suite.children: suite.remove() pass_leaf = Leaf(text_type, u'pass') pass_leaf.prefix = orig_meta_prefix node.append_child(pass_leaf) node.append_child(Leaf(token.NEWLINE, u'\n')) elif len(suite.children) > 1 and \ (suite.children[-2].type == token.INDENT and suite.children[-1].type == token.DEDENT): pass_leaf = Leaf(text_type, u'pass') suite.insert_child(-1, pass_leaf) suite.insert_child(-1, Leaf(token.NEWLINE, u'\n'))
true
true
1c3576f304149d2feb21fca90c610701e782e463
7,309
py
Python
protoflow/utils/data.py
theblackfly/protoflow
02a77e59f6afc8d462a738874d06eca810911166
[ "MIT" ]
3
2020-10-07T05:04:05.000Z
2021-02-10T15:04:55.000Z
protoflow/utils/data.py
theblackfly/protoflow
02a77e59f6afc8d462a738874d06eca810911166
[ "MIT" ]
5
2020-04-09T13:36:15.000Z
2020-12-17T16:30:50.000Z
protoflow/utils/data.py
theblackfly/protoflow
02a77e59f6afc8d462a738874d06eca810911166
[ "MIT" ]
2
2020-10-01T21:48:16.000Z
2021-04-10T18:20:25.000Z
"""ProtoFlow data utilities.""" import hashlib import os import shutil import tarfile import zipfile import requests import six from six.moves.urllib.error import HTTPError, URLError from tensorflow.python.keras.utils.io_utils import path_to_string from tqdm import tqdm def _extract_archive(file_path, path=".", archive_format="auto"): """Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats. Arguments: file_path: path to the archive file path: path to extract the archive file archive_format: Archive format to try for extracting the file. Options are "auto", "tar", "zip", and None. "tar" includes tar, tar.gz, and tar.bz files. The default "auto" is ["tar", "zip"]. None or an empty list will return no matches found. Returns: True if a match was found and an archive extraction was completed, False otherwise. """ if archive_format is None: return False if archive_format == "auto": archive_format = ["tar", "zip"] if isinstance(archive_format, six.string_types): archive_format = [archive_format] file_path = path_to_string(file_path) path = path_to_string(path) for archive_type in archive_format: if archive_type == "tar": open_fn = tarfile.open is_match_fn = tarfile.is_tarfile if archive_type == "zip": open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile if is_match_fn(file_path): with open_fn(file_path) as archive: try: archive.extractall(path) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False def _hash_file(fpath, algorithm="sha256", chunk_size=65535): """Calculates a file sha256 or md5 hash. Example: ```python _hash_file("/path/to/file.zip") "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ``` Arguments: fpath: path to the file being validated algorithm: hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`. The default `"auto"` detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: The file hash """ if (algorithm == "sha256") or (algorithm == "auto" and len(hash) == 64): hasher = hashlib.sha256() else: hasher = hashlib.md5() with open(fpath, "rb") as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b""): hasher.update(chunk) return hasher.hexdigest() def _validate_file(fpath, file_hash, algorithm="auto", chunk_size=65535): """Validates a file against a sha256 or md5 hash. Arguments: fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of "auto", "sha256", or "md5". The default "auto" detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: Whether the file is valid """ if (algorithm == "sha256") or (algorithm == "auto" and len(file_hash) == 64): hasher = "sha256" else: hasher = "md5" if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash): return True else: return False def _get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith("download_warning"): return value return None def _save_response_content(response, destination, chunk_size=32768): with open(destination, "wb") as f: pbar = tqdm(total=None) progress = 0 for chunk in response.iter_content(chunk_size): if chunk: # filter out keep-alive new chunks f.write(chunk) progress += len(chunk) pbar.update(progress - pbar.n) pbar.close() def get_file_from_google(fname, file_id, untar=False, md5_hash=None, file_hash=None, cache_subdir="datasets", hash_algorithm="auto", extract=False, archive_format="auto", cache_dir=None): if cache_dir is None: cache_dir = os.path.join(os.path.expanduser("~"), ".keras") if md5_hash is not None and file_hash is None: file_hash = md5_hash hash_algorithm = "md5" datadir_base = os.path.expanduser(cache_dir) if not os.access(datadir_base, os.W_OK): datadir_base = os.path.join("/tmp", ".keras") datadir = os.path.join(datadir_base, cache_subdir) os.makedirs(datadir, exist_ok=True) fname = path_to_string(fname) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + ".tar.gz" else: fpath = os.path.join(datadir, fname) download = False if os.path.exists(fpath): # File found; verify integrity if a hash was provided. if file_hash is not None: if not _validate_file(fpath, file_hash, algorithm=hash_algorithm): print("A local file was found, but it seems to be " "incomplete or outdated because the " + hash_algorithm + " file hash does not match the original value of " + file_hash + " so we will re-download the data.") download = True else: download = True if download: print("Downloading data from Google Drive...") error_msg = "Failed on https://drive.google.com/file/d/{}: {} -- {}" try: try: url = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(url, params={"id": file_id}, stream=True) token = _get_confirm_token(response) if token: params = {"id": file_id, "confirm": token} response = session.get(url, params=params, stream=True) _save_response_content(response, fpath) except HTTPError as e: raise Exception(error_msg.format(file_id, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(file_id, e.errno, e.reason)) except (Exception, KeyboardInterrupt) as e: if os.path.exists(fpath): os.remove(fpath) raise e if untar: if not os.path.exists(untar_fpath): _extract_archive(fpath, datadir, archive_format="tar") return untar_fpath if extract: _extract_archive(fpath, datadir, archive_format) return fpath
33.072398
78
0.583801
import hashlib import os import shutil import tarfile import zipfile import requests import six from six.moves.urllib.error import HTTPError, URLError from tensorflow.python.keras.utils.io_utils import path_to_string from tqdm import tqdm def _extract_archive(file_path, path=".", archive_format="auto"): if archive_format is None: return False if archive_format == "auto": archive_format = ["tar", "zip"] if isinstance(archive_format, six.string_types): archive_format = [archive_format] file_path = path_to_string(file_path) path = path_to_string(path) for archive_type in archive_format: if archive_type == "tar": open_fn = tarfile.open is_match_fn = tarfile.is_tarfile if archive_type == "zip": open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile if is_match_fn(file_path): with open_fn(file_path) as archive: try: archive.extractall(path) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False def _hash_file(fpath, algorithm="sha256", chunk_size=65535): if (algorithm == "sha256") or (algorithm == "auto" and len(hash) == 64): hasher = hashlib.sha256() else: hasher = hashlib.md5() with open(fpath, "rb") as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b""): hasher.update(chunk) return hasher.hexdigest() def _validate_file(fpath, file_hash, algorithm="auto", chunk_size=65535): if (algorithm == "sha256") or (algorithm == "auto" and len(file_hash) == 64): hasher = "sha256" else: hasher = "md5" if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash): return True else: return False def _get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith("download_warning"): return value return None def _save_response_content(response, destination, chunk_size=32768): with open(destination, "wb") as f: pbar = tqdm(total=None) progress = 0 for chunk in response.iter_content(chunk_size): if chunk: f.write(chunk) progress += len(chunk) pbar.update(progress - pbar.n) pbar.close() def get_file_from_google(fname, file_id, untar=False, md5_hash=None, file_hash=None, cache_subdir="datasets", hash_algorithm="auto", extract=False, archive_format="auto", cache_dir=None): if cache_dir is None: cache_dir = os.path.join(os.path.expanduser("~"), ".keras") if md5_hash is not None and file_hash is None: file_hash = md5_hash hash_algorithm = "md5" datadir_base = os.path.expanduser(cache_dir) if not os.access(datadir_base, os.W_OK): datadir_base = os.path.join("/tmp", ".keras") datadir = os.path.join(datadir_base, cache_subdir) os.makedirs(datadir, exist_ok=True) fname = path_to_string(fname) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + ".tar.gz" else: fpath = os.path.join(datadir, fname) download = False if os.path.exists(fpath): if file_hash is not None: if not _validate_file(fpath, file_hash, algorithm=hash_algorithm): print("A local file was found, but it seems to be " "incomplete or outdated because the " + hash_algorithm + " file hash does not match the original value of " + file_hash + " so we will re-download the data.") download = True else: download = True if download: print("Downloading data from Google Drive...") error_msg = "Failed on https://drive.google.com/file/d/{}: {} -- {}" try: try: url = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(url, params={"id": file_id}, stream=True) token = _get_confirm_token(response) if token: params = {"id": file_id, "confirm": token} response = session.get(url, params=params, stream=True) _save_response_content(response, fpath) except HTTPError as e: raise Exception(error_msg.format(file_id, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(file_id, e.errno, e.reason)) except (Exception, KeyboardInterrupt) as e: if os.path.exists(fpath): os.remove(fpath) raise e if untar: if not os.path.exists(untar_fpath): _extract_archive(fpath, datadir, archive_format="tar") return untar_fpath if extract: _extract_archive(fpath, datadir, archive_format) return fpath
true
true
1c35782b600897baf619499fe88cd4250bd51dd4
63
py
Python
test/test_interfaces.py
jiayiliu/gradio
6fa11437c09845322df3b47f732a924338d17862
[ "Apache-2.0" ]
5,481
2019-05-27T06:18:02.000Z
2022-03-31T20:33:43.000Z
test/test_interfaces.py
kanavanand/gradio
5eaa4b1e8b8971d1973b596b3d53aaa99739a07c
[ "Apache-2.0" ]
652
2019-06-18T20:16:03.000Z
2022-03-31T19:36:16.000Z
test/test_interfaces.py
kanavanand/gradio
5eaa4b1e8b8971d1973b596b3d53aaa99739a07c
[ "Apache-2.0" ]
366
2019-07-03T00:32:02.000Z
2022-03-31T11:32:01.000Z
import unittest if __name__ == '__main__': unittest.main()
15.75
26
0.698413
import unittest if __name__ == '__main__': unittest.main()
true
true
1c35792105a982e5498ceea5705f65e05efe0cc9
3,201
py
Python
samples/19.custom-dialogs/app.py
hangdong/botbuilder-python
8ff979a58fadc4356d76b9ce577f94da3245f664
[ "MIT" ]
null
null
null
samples/19.custom-dialogs/app.py
hangdong/botbuilder-python
8ff979a58fadc4356d76b9ce577f94da3245f664
[ "MIT" ]
null
null
null
samples/19.custom-dialogs/app.py
hangdong/botbuilder-python
8ff979a58fadc4356d76b9ce577f94da3245f664
[ "MIT" ]
null
null
null
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import asyncio import sys from datetime import datetime from flask import Flask, request, Response from botbuilder.core import ( BotFrameworkAdapter, BotFrameworkAdapterSettings, ConversationState, MemoryStorage, TurnContext, UserState, ) from botbuilder.schema import Activity, ActivityTypes from bots import DialogBot # Create the loop and Flask app from dialogs.root_dialog import RootDialog LOOP = asyncio.get_event_loop() APP = Flask(__name__, instance_relative_config=True) APP.config.from_object("config.DefaultConfig") # Create adapter. # See https://aka.ms/about-bot-adapter to learn more about how bots work. SETTINGS = BotFrameworkAdapterSettings(APP.config["APP_ID"], APP.config["APP_PASSWORD"]) ADAPTER = BotFrameworkAdapter(SETTINGS) # Catch-all for errors. async def on_error(context: TurnContext, error: Exception): # This check writes out errors to console log .vs. app insights. # NOTE: In production environment, you should consider logging this to Azure # application insights. print(f"\n [on_turn_error] unhandled error: {error}", file=sys.stderr) # Send a message to the user await context.send_activity("The bot encountered an error or bug.") await context.send_activity( "To continue to run this bot, please fix the bot source code." ) # Send a trace activity if we're talking to the Bot Framework Emulator if context.activity.channel_id == "emulator": # Create a trace activity that contains the error object trace_activity = Activity( label="TurnError", name="on_turn_error Trace", timestamp=datetime.utcnow(), type=ActivityTypes.trace, value=f"{error}", value_type="https://www.botframework.com/schemas/error", ) # Send a trace activity, which will be displayed in Bot Framework Emulator await context.send_activity(trace_activity) ADAPTER.on_turn_error = on_error # Create MemoryStorage and state MEMORY = MemoryStorage() USER_STATE = UserState(MEMORY) CONVERSATION_STATE = ConversationState(MEMORY) # Create Dialog and Bot DIALOG = RootDialog(USER_STATE) BOT = DialogBot(CONVERSATION_STATE, USER_STATE, DIALOG) # Listen for incoming requests on /api/messages. @APP.route("/api/messages", methods=["POST"]) def messages(): # Main bot message handler. if "application/json" in request.headers["Content-Type"]: body = request.json else: return Response(status=415) activity = Activity().deserialize(body) auth_header = ( request.headers["Authorization"] if "Authorization" in request.headers else "" ) try: task = LOOP.create_task( ADAPTER.process_activity(activity, auth_header, BOT.on_turn) ) LOOP.run_until_complete(task) return Response(status=201) except Exception as exception: raise exception if __name__ == "__main__": try: APP.run(debug=False, port=APP.config["PORT"]) # nosec debug except Exception as exception: raise exception
31.382353
88
0.707591
import asyncio import sys from datetime import datetime from flask import Flask, request, Response from botbuilder.core import ( BotFrameworkAdapter, BotFrameworkAdapterSettings, ConversationState, MemoryStorage, TurnContext, UserState, ) from botbuilder.schema import Activity, ActivityTypes from bots import DialogBot from dialogs.root_dialog import RootDialog LOOP = asyncio.get_event_loop() APP = Flask(__name__, instance_relative_config=True) APP.config.from_object("config.DefaultConfig") SETTINGS = BotFrameworkAdapterSettings(APP.config["APP_ID"], APP.config["APP_PASSWORD"]) ADAPTER = BotFrameworkAdapter(SETTINGS) async def on_error(context: TurnContext, error: Exception): print(f"\n [on_turn_error] unhandled error: {error}", file=sys.stderr) await context.send_activity("The bot encountered an error or bug.") await context.send_activity( "To continue to run this bot, please fix the bot source code." ) if context.activity.channel_id == "emulator": # Create a trace activity that contains the error object trace_activity = Activity( label="TurnError", name="on_turn_error Trace", timestamp=datetime.utcnow(), type=ActivityTypes.trace, value=f"{error}", value_type="https://www.botframework.com/schemas/error", ) # Send a trace activity, which will be displayed in Bot Framework Emulator await context.send_activity(trace_activity) ADAPTER.on_turn_error = on_error # Create MemoryStorage and state MEMORY = MemoryStorage() USER_STATE = UserState(MEMORY) CONVERSATION_STATE = ConversationState(MEMORY) # Create Dialog and Bot DIALOG = RootDialog(USER_STATE) BOT = DialogBot(CONVERSATION_STATE, USER_STATE, DIALOG) # Listen for incoming requests on /api/messages. @APP.route("/api/messages", methods=["POST"]) def messages(): # Main bot message handler. if "application/json" in request.headers["Content-Type"]: body = request.json else: return Response(status=415) activity = Activity().deserialize(body) auth_header = ( request.headers["Authorization"] if "Authorization" in request.headers else "" ) try: task = LOOP.create_task( ADAPTER.process_activity(activity, auth_header, BOT.on_turn) ) LOOP.run_until_complete(task) return Response(status=201) except Exception as exception: raise exception if __name__ == "__main__": try: APP.run(debug=False, port=APP.config["PORT"]) # nosec debug except Exception as exception: raise exception
true
true
1c35796495a6efe9aa4ee9bb6b30b875bd60a38d
21
py
Python
examples/__init__.py
krvss/graph-talk
b420bc5b57c7dc25008428bf7c0ebfcbebd61b72
[ "Apache-2.0" ]
null
null
null
examples/__init__.py
krvss/graph-talk
b420bc5b57c7dc25008428bf7c0ebfcbebd61b72
[ "Apache-2.0" ]
null
null
null
examples/__init__.py
krvss/graph-talk
b420bc5b57c7dc25008428bf7c0ebfcbebd61b72
[ "Apache-2.0" ]
1
2018-01-11T09:23:46.000Z
2018-01-11T09:23:46.000Z
__author__ = 'krvss'
10.5
20
0.714286
__author__ = 'krvss'
true
true
1c3579c52b094c65f1192091953fefc03b1d7d27
14,473
py
Python
airflow/providers/elasticsearch/log/es_task_handler.py
markhatch/airflow
1d170f899bcc87110e55192517270ec89d511ca8
[ "Apache-2.0" ]
null
null
null
airflow/providers/elasticsearch/log/es_task_handler.py
markhatch/airflow
1d170f899bcc87110e55192517270ec89d511ca8
[ "Apache-2.0" ]
null
null
null
airflow/providers/elasticsearch/log/es_task_handler.py
markhatch/airflow
1d170f899bcc87110e55192517270ec89d511ca8
[ "Apache-2.0" ]
null
null
null
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import sys from collections import defaultdict from datetime import datetime from operator import attrgetter from time import time from typing import List, Optional, Tuple, Union from urllib.parse import quote # Using `from elasticsearch import *` would break elasticsearch mocking used in unit test. import elasticsearch import pendulum from elasticsearch_dsl import Search from airflow.configuration import conf from airflow.models import TaskInstance from airflow.utils import timezone from airflow.utils.log.file_task_handler import FileTaskHandler from airflow.utils.log.json_formatter import JSONFormatter from airflow.utils.log.logging_mixin import ExternalLoggingMixin, LoggingMixin # Elasticsearch hosted log type EsLogMsgType = List[Tuple[str, str]] class ElasticsearchTaskHandler(FileTaskHandler, ExternalLoggingMixin, LoggingMixin): """ ElasticsearchTaskHandler is a python log handler that reads logs from Elasticsearch. Note that Airflow does not handle the indexing of logs into Elasticsearch. Instead, Airflow flushes logs into local files. Additional software setup is required to index the logs into Elasticsearch, such as using Filebeat and Logstash. To efficiently query and sort Elasticsearch results, this handler assumes each log message has a field `log_id` consists of ti primary keys: `log_id = {dag_id}-{task_id}-{execution_date}-{try_number}` Log messages with specific log_id are sorted based on `offset`, which is a unique integer indicates log message's order. Timestamps here are unreliable because multiple log messages might have the same timestamp. """ PAGE = 0 MAX_LINE_PER_PAGE = 1000 LOG_NAME = 'Elasticsearch' def __init__( self, base_log_folder: str, filename_template: str, log_id_template: str, end_of_log_mark: str, write_stdout: bool, json_format: bool, json_fields: str, host_field: str = "host", offset_field: str = "offset", host: str = "localhost:9200", frontend: str = "localhost:5601", es_kwargs: Optional[dict] = conf.getsection("elasticsearch_configs"), ): """ :param base_log_folder: base folder to store logs locally :param log_id_template: log id template :param host: Elasticsearch host name """ es_kwargs = es_kwargs or {} super().__init__(base_log_folder, filename_template) self.closed = False self.client = elasticsearch.Elasticsearch([host], **es_kwargs) self.log_id_template = log_id_template self.frontend = frontend self.mark_end_on_close = True self.end_of_log_mark = end_of_log_mark self.write_stdout = write_stdout self.json_format = json_format self.json_fields = [label.strip() for label in json_fields.split(",")] self.host_field = host_field self.offset_field = offset_field self.context_set = False self.formatter: logging.Formatter self.handler: Union[logging.FileHandler, logging.StreamHandler] # type: ignore[assignment] def _render_log_id(self, ti: TaskInstance, try_number: int) -> str: dag_run = ti.dag_run if self.json_format: data_interval_start = self._clean_date(dag_run.data_interval_start) data_interval_end = self._clean_date(dag_run.data_interval_end) execution_date = self._clean_date(dag_run.execution_date) else: data_interval_start = dag_run.data_interval_start.isoformat() data_interval_end = dag_run.data_interval_end.isoformat() execution_date = dag_run.execution_date.isoformat() return self.log_id_template.format( dag_id=ti.dag_id, task_id=ti.task_id, run_id=ti.run_id, data_interval_start=data_interval_start, data_interval_end=data_interval_end, execution_date=execution_date, try_number=try_number, ) @staticmethod def _clean_date(value: datetime) -> str: """ Clean up a date value so that it is safe to query in elasticsearch by removing reserved characters. # https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#_reserved_characters :param execution_date: execution date of the dag run. """ return value.strftime("%Y_%m_%dT%H_%M_%S_%f") def _group_logs_by_host(self, logs): grouped_logs = defaultdict(list) for log in logs: key = getattr(log, self.host_field, 'default_host') grouped_logs[key].append(log) # return items sorted by timestamp. result = sorted(grouped_logs.items(), key=lambda kv: getattr(kv[1][0], 'message', '_')) return result def _read_grouped_logs(self): return True def _read( self, ti: TaskInstance, try_number: int, metadata: Optional[dict] = None ) -> Tuple[EsLogMsgType, dict]: """ Endpoint for streaming log. :param ti: task instance object :param try_number: try_number of the task instance :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: a list of tuple with host and log documents, metadata. """ if not metadata: metadata = {'offset': 0} if 'offset' not in metadata: metadata['offset'] = 0 offset = metadata['offset'] log_id = self._render_log_id(ti, try_number) logs = self.es_read(log_id, offset, metadata) logs_by_host = self._group_logs_by_host(logs) next_offset = offset if not logs else attrgetter(self.offset_field)(logs[-1]) # Ensure a string here. Large offset numbers will get JSON.parsed incorrectly # on the client. Sending as a string prevents this issue. # https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER metadata['offset'] = str(next_offset) # end_of_log_mark may contain characters like '\n' which is needed to # have the log uploaded but will not be stored in elasticsearch. loading_hosts = [ item[0] for item in logs_by_host if item[-1][-1].message != self.end_of_log_mark.strip() ] metadata['end_of_log'] = False if not logs else len(loading_hosts) == 0 cur_ts = pendulum.now() # Assume end of log after not receiving new log for 5 min, # as executor heartbeat is 1 min and there might be some # delay before Elasticsearch makes the log available. if 'last_log_timestamp' in metadata: last_log_ts = timezone.parse(metadata['last_log_timestamp']) if ( cur_ts.diff(last_log_ts).in_minutes() >= 5 or 'max_offset' in metadata and int(offset) >= int(metadata['max_offset']) ): metadata['end_of_log'] = True if int(offset) != int(next_offset) or 'last_log_timestamp' not in metadata: metadata['last_log_timestamp'] = str(cur_ts) # If we hit the end of the log, remove the actual end_of_log message # to prevent it from showing in the UI. def concat_logs(lines): log_range = (len(lines) - 1) if lines[-1].message == self.end_of_log_mark.strip() else len(lines) return '\n'.join(self._format_msg(lines[i]) for i in range(log_range)) message = [(host, concat_logs(hosted_log)) for host, hosted_log in logs_by_host] return message, metadata def _format_msg(self, log_line): """Format ES Record to match settings.LOG_FORMAT when used with json_format""" # Using formatter._style.format makes it future proof i.e. # if we change the formatter style from '%' to '{' or '$', this will still work if self.json_format: try: return self.formatter._style.format(_ESJsonLogFmt(self.json_fields, **log_line.to_dict())) except Exception: pass # Just a safe-guard to preserve backwards-compatibility return log_line.message def es_read(self, log_id: str, offset: str, metadata: dict) -> list: """ Returns the logs matching log_id in Elasticsearch and next offset. Returns '' if no log is found or there was an error. :param log_id: the log_id of the log to read. :param offset: the offset start to read log from. :param metadata: log metadata, used for steaming log download. """ # Offset is the unique key for sorting logs given log_id. search = Search(using=self.client).query('match_phrase', log_id=log_id).sort(self.offset_field) search = search.filter('range', **{self.offset_field: {'gt': int(offset)}}) max_log_line = search.count() if 'download_logs' in metadata and metadata['download_logs'] and 'max_offset' not in metadata: try: if max_log_line > 0: metadata['max_offset'] = attrgetter(self.offset_field)( search[max_log_line - 1].execute()[-1] ) else: metadata['max_offset'] = 0 except Exception: self.log.exception('Could not get current log size with log_id: %s', log_id) logs = [] if max_log_line != 0: try: logs = search[self.MAX_LINE_PER_PAGE * self.PAGE : self.MAX_LINE_PER_PAGE].execute() except Exception: self.log.exception('Could not read log with log_id: %s', log_id) return logs def emit(self, record): if self.handler: record.offset = int(time() * (10 ** 9)) self.handler.emit(record) def set_context(self, ti: TaskInstance) -> None: """ Provide task_instance context to airflow task handler. :param ti: task instance object """ self.mark_end_on_close = not ti.raw if self.json_format: self.formatter = JSONFormatter( fmt=self.formatter._fmt, json_fields=self.json_fields + [self.offset_field], extras={ 'dag_id': str(ti.dag_id), 'task_id': str(ti.task_id), 'execution_date': self._clean_date(ti.execution_date), 'try_number': str(ti.try_number), 'log_id': self._render_log_id(ti, ti.try_number), }, ) if self.write_stdout: if self.context_set: # We don't want to re-set up the handler if this logger has # already been initialized return self.handler = logging.StreamHandler(stream=sys.__stdout__) self.handler.setLevel(self.level) self.handler.setFormatter(self.formatter) else: super().set_context(ti) self.context_set = True def close(self) -> None: # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return if not self.mark_end_on_close: self.closed = True return # Case which context of the handler was not set. if self.handler is None: self.closed = True return # Reopen the file stream, because FileHandler.close() would be called # first in logging.shutdown() and the stream in it would be set to None. if self.handler.stream is None or self.handler.stream.closed: # type: ignore[attr-defined] self.handler.stream = self.handler._open() # type: ignore[union-attr] # Mark the end of file using end of log mark, # so we know where to stop while auto-tailing. self.handler.stream.write(self.end_of_log_mark) if self.write_stdout: self.handler.close() sys.stdout = sys.__stdout__ super().close() self.closed = True @property def log_name(self) -> str: """The log name""" return self.LOG_NAME def get_external_log_url(self, task_instance: TaskInstance, try_number: int) -> str: """ Creates an address for an external log collecting service. :param task_instance: task instance object :type: task_instance: TaskInstance :param try_number: task instance try_number to read logs from. :return: URL to the external log collection service :rtype: str """ log_id = self._render_log_id(task_instance, try_number) scheme = '' if '://' in self.frontend else 'https://' return scheme + self.frontend.format(log_id=quote(log_id)) @property def supports_external_link(self) -> bool: """Whether we can support external links""" return bool(self.frontend) class _ESJsonLogFmt: """Helper class to read ES Logs and re-format it to match settings.LOG_FORMAT""" # A separate class is needed because 'self.formatter._style.format' uses '.__dict__' def __init__(self, json_fields: List, **kwargs): for field in json_fields: self.__setattr__(field, '') self.__dict__.update(kwargs)
39.116216
128
0.644994
import logging import sys from collections import defaultdict from datetime import datetime from operator import attrgetter from time import time from typing import List, Optional, Tuple, Union from urllib.parse import quote import elasticsearch import pendulum from elasticsearch_dsl import Search from airflow.configuration import conf from airflow.models import TaskInstance from airflow.utils import timezone from airflow.utils.log.file_task_handler import FileTaskHandler from airflow.utils.log.json_formatter import JSONFormatter from airflow.utils.log.logging_mixin import ExternalLoggingMixin, LoggingMixin EsLogMsgType = List[Tuple[str, str]] class ElasticsearchTaskHandler(FileTaskHandler, ExternalLoggingMixin, LoggingMixin): PAGE = 0 MAX_LINE_PER_PAGE = 1000 LOG_NAME = 'Elasticsearch' def __init__( self, base_log_folder: str, filename_template: str, log_id_template: str, end_of_log_mark: str, write_stdout: bool, json_format: bool, json_fields: str, host_field: str = "host", offset_field: str = "offset", host: str = "localhost:9200", frontend: str = "localhost:5601", es_kwargs: Optional[dict] = conf.getsection("elasticsearch_configs"), ): es_kwargs = es_kwargs or {} super().__init__(base_log_folder, filename_template) self.closed = False self.client = elasticsearch.Elasticsearch([host], **es_kwargs) self.log_id_template = log_id_template self.frontend = frontend self.mark_end_on_close = True self.end_of_log_mark = end_of_log_mark self.write_stdout = write_stdout self.json_format = json_format self.json_fields = [label.strip() for label in json_fields.split(",")] self.host_field = host_field self.offset_field = offset_field self.context_set = False self.formatter: logging.Formatter self.handler: Union[logging.FileHandler, logging.StreamHandler] def _render_log_id(self, ti: TaskInstance, try_number: int) -> str: dag_run = ti.dag_run if self.json_format: data_interval_start = self._clean_date(dag_run.data_interval_start) data_interval_end = self._clean_date(dag_run.data_interval_end) execution_date = self._clean_date(dag_run.execution_date) else: data_interval_start = dag_run.data_interval_start.isoformat() data_interval_end = dag_run.data_interval_end.isoformat() execution_date = dag_run.execution_date.isoformat() return self.log_id_template.format( dag_id=ti.dag_id, task_id=ti.task_id, run_id=ti.run_id, data_interval_start=data_interval_start, data_interval_end=data_interval_end, execution_date=execution_date, try_number=try_number, ) @staticmethod def _clean_date(value: datetime) -> str: return value.strftime("%Y_%m_%dT%H_%M_%S_%f") def _group_logs_by_host(self, logs): grouped_logs = defaultdict(list) for log in logs: key = getattr(log, self.host_field, 'default_host') grouped_logs[key].append(log) result = sorted(grouped_logs.items(), key=lambda kv: getattr(kv[1][0], 'message', '_')) return result def _read_grouped_logs(self): return True def _read( self, ti: TaskInstance, try_number: int, metadata: Optional[dict] = None ) -> Tuple[EsLogMsgType, dict]: if not metadata: metadata = {'offset': 0} if 'offset' not in metadata: metadata['offset'] = 0 offset = metadata['offset'] log_id = self._render_log_id(ti, try_number) logs = self.es_read(log_id, offset, metadata) logs_by_host = self._group_logs_by_host(logs) next_offset = offset if not logs else attrgetter(self.offset_field)(logs[-1]) metadata['offset'] = str(next_offset) loading_hosts = [ item[0] for item in logs_by_host if item[-1][-1].message != self.end_of_log_mark.strip() ] metadata['end_of_log'] = False if not logs else len(loading_hosts) == 0 cur_ts = pendulum.now() if 'last_log_timestamp' in metadata: last_log_ts = timezone.parse(metadata['last_log_timestamp']) if ( cur_ts.diff(last_log_ts).in_minutes() >= 5 or 'max_offset' in metadata and int(offset) >= int(metadata['max_offset']) ): metadata['end_of_log'] = True if int(offset) != int(next_offset) or 'last_log_timestamp' not in metadata: metadata['last_log_timestamp'] = str(cur_ts) def concat_logs(lines): log_range = (len(lines) - 1) if lines[-1].message == self.end_of_log_mark.strip() else len(lines) return '\n'.join(self._format_msg(lines[i]) for i in range(log_range)) message = [(host, concat_logs(hosted_log)) for host, hosted_log in logs_by_host] return message, metadata def _format_msg(self, log_line): if self.json_format: try: return self.formatter._style.format(_ESJsonLogFmt(self.json_fields, **log_line.to_dict())) except Exception: pass return log_line.message def es_read(self, log_id: str, offset: str, metadata: dict) -> list: search = Search(using=self.client).query('match_phrase', log_id=log_id).sort(self.offset_field) search = search.filter('range', **{self.offset_field: {'gt': int(offset)}}) max_log_line = search.count() if 'download_logs' in metadata and metadata['download_logs'] and 'max_offset' not in metadata: try: if max_log_line > 0: metadata['max_offset'] = attrgetter(self.offset_field)( search[max_log_line - 1].execute()[-1] ) else: metadata['max_offset'] = 0 except Exception: self.log.exception('Could not get current log size with log_id: %s', log_id) logs = [] if max_log_line != 0: try: logs = search[self.MAX_LINE_PER_PAGE * self.PAGE : self.MAX_LINE_PER_PAGE].execute() except Exception: self.log.exception('Could not read log with log_id: %s', log_id) return logs def emit(self, record): if self.handler: record.offset = int(time() * (10 ** 9)) self.handler.emit(record) def set_context(self, ti: TaskInstance) -> None: self.mark_end_on_close = not ti.raw if self.json_format: self.formatter = JSONFormatter( fmt=self.formatter._fmt, json_fields=self.json_fields + [self.offset_field], extras={ 'dag_id': str(ti.dag_id), 'task_id': str(ti.task_id), 'execution_date': self._clean_date(ti.execution_date), 'try_number': str(ti.try_number), 'log_id': self._render_log_id(ti, ti.try_number), }, ) if self.write_stdout: if self.context_set: # already been initialized return self.handler = logging.StreamHandler(stream=sys.__stdout__) self.handler.setLevel(self.level) self.handler.setFormatter(self.formatter) else: super().set_context(ti) self.context_set = True def close(self) -> None: # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return if not self.mark_end_on_close: self.closed = True return # Case which context of the handler was not set. if self.handler is None: self.closed = True return # Reopen the file stream, because FileHandler.close() would be called # first in logging.shutdown() and the stream in it would be set to None. if self.handler.stream is None or self.handler.stream.closed: # type: ignore[attr-defined] self.handler.stream = self.handler._open() # type: ignore[union-attr] # Mark the end of file using end of log mark, # so we know where to stop while auto-tailing. self.handler.stream.write(self.end_of_log_mark) if self.write_stdout: self.handler.close() sys.stdout = sys.__stdout__ super().close() self.closed = True @property def log_name(self) -> str: return self.LOG_NAME def get_external_log_url(self, task_instance: TaskInstance, try_number: int) -> str: log_id = self._render_log_id(task_instance, try_number) scheme = '' if '://' in self.frontend else 'https://' return scheme + self.frontend.format(log_id=quote(log_id)) @property def supports_external_link(self) -> bool: return bool(self.frontend) class _ESJsonLogFmt: # A separate class is needed because 'self.formatter._style.format' uses '.__dict__' def __init__(self, json_fields: List, **kwargs): for field in json_fields: self.__setattr__(field, '') self.__dict__.update(kwargs)
true
true
1c357aaabd26ad1c9b7e06b63dbb6707448fb6a2
7,407
py
Python
src/plugins/EU_ZH/fetcher.py
johnharveymath/fetchers-python
c0049c2ea5731c850c5dbcf9602b51d7393a92f6
[ "Apache-2.0" ]
4
2020-04-03T16:20:08.000Z
2020-07-06T10:16:44.000Z
src/plugins/EU_ZH/fetcher.py
johnharveymath/fetchers-python
c0049c2ea5731c850c5dbcf9602b51d7393a92f6
[ "Apache-2.0" ]
15
2020-04-28T19:47:58.000Z
2020-10-20T12:18:59.000Z
src/plugins/EU_ZH/fetcher.py
yuewu57/fetchers-python
1f5fc7b64f43142991ad0d901b0840b3e8ef1382
[ "Apache-2.0" ]
26
2020-03-31T13:14:50.000Z
2021-06-23T21:02:54.000Z
# Copyright (C) 2020 University of Oxford # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pandas as pd import os import sys __all__ = ('EU_ZH_Fetcher',) from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher logger = logging.getLogger(__name__) """ site-location: https://github.com/covid19-eu-zh/covid19-eu-data COVID19 data for European countries created and maintained by covid19-eu-zh Data originally from Austria's Sozial Ministerium https://www.sozialministerium.at/Informationen-zum-Coronavirus/Neuartiges-Coronavirus-(2019-nCov).html Czech Ministry of Health https://onemocneni-aktualne.mzcr.cz/covid-19 Germany's Robert Koch Institute https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html Hungary's Office of the Prime Minister https://koronavirus.gov.hu/ Ireland's Health Protection Surveillance Centre https://www.hpsc.ie/a-z/respiratory/coronavirus/novelcoronavirus/casesinireland/ Poland - Government https://www.gov.pl/web/koronawirus/wykaz-zarazen-koronawirusem-sars-cov-2 Sweden's Public Health Authority https://www.folkhalsomyndigheten.se/smittskydd-beredskap/utbrott/aktuella-utbrott/covid-19/aktuellt-epidemiologiskt-lage/ Slovenia's Government Communications Office https://www.gov.si/en/topics/coronavirus-disease-covid-19/ Belgian institute for health: https://epistat.wiv-isp.be/Covid/ """ class EU_ZH_Fetcher(BaseEpidemiologyFetcher): LOAD_PLUGIN = True SOURCE = 'EU_ZH' def fetch(self, url): return pd.read_csv(url) # Certain regions have excess characters in some source files def clean_string(self, input): if isinstance(input, str): return input.replace('­', '') else: return input def parse_int(self, data): if pd.isna(data): return None if isinstance(data, str): data = data.replace('*', '') return int(data) def country_fetcher(self, region, country, code_3, code_2): logger.info("Processing number of cases in " + country) if code_3 == 'NOR': logger.warning("These GIDs not entirely accurate due to change in Norway's county boundaries, 2020.") if code_3 == 'BEL': logger.warning("These GIDs has MISSING region due to unknown data resourses, 2020.") url = 'https://github.com/covid19-eu-zh/covid19-eu-data/raw/master/dataset/covid-19-' + code_2 + '.csv' df = self.fetch(url) for index, record in df.iterrows(): # date Y-m-d or Y-m-dTH:M:S date = record['datetime'].split('T')[0] adm_area_2 = None # If no region is reported then all data is national if not hasattr(record, region): adm_area_1 = None gid = [code_3] # Ignore two known corrupted lines in the Polish data elif str(record[region])[:4] == 'http': continue elif pd.isna(record[region]) and code_3 == 'POL': continue # Austria's national data is reported with a blank region elif pd.isna(record[region]) and code_3 == 'AUT': adm_area_1 = None gid = [code_3] elif region == 'nuts_2' and code_3 == 'BEL': if self.clean_string(record['nuts_1']) == 'MISSING' or pd.isna(record[region]): continue success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr( input_adm_area_1=self.clean_string(record['nuts_1']), input_adm_area_2=self.clean_string(record[region]), return_original_if_failure=True, suppress_exception=True ) # If the region appears cleanly, then we can translate to obtain GID elif region == 'nuts_1' and code_3 == 'BEL': if pd.notna(record['nuts_2']): continue success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr( input_adm_area_1=self.clean_string(record[region]), return_original_if_failure=True, suppress_exception=True ) else: success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr( input_adm_area_1=self.clean_string(record[region]), return_original_if_failure=True, suppress_exception=True ) upsert_obj = { 'source': self.SOURCE, 'date': date, 'country': country, 'countrycode': code_3, 'adm_area_1': adm_area_1, 'adm_area_2': adm_area_2, 'adm_area_3': None, 'gid': gid } # add the epidemiological properties to the object if they exist if hasattr(record, 'tests'): upsert_obj['tested'] = self.parse_int(record['tests']) if hasattr(record, 'cases'): upsert_obj['confirmed'] = self.parse_int(record['cases']) if hasattr(record, 'tests_positive'): upsert_obj['confirmed'] = self.parse_int(record['tests_positive']) if hasattr(record, 'recovered'): upsert_obj['recovered'] = self.parse_int(record['recovered']) if hasattr(record, 'deaths'): upsert_obj['dead'] = self.parse_int(record['deaths']) if hasattr(record, 'hospitalized'): upsert_obj['hospitalised'] = self.parse_int(record['hospitalized']) if hasattr(record, 'intensive_care'): upsert_obj['hospitalised_icu'] = self.parse_int(record['intensive_care']) if hasattr(record, 'quarantine'): upsert_obj['quarantined'] = self.parse_int(record['quarantine']) self.upsert_data(**upsert_obj) # read the list of countries from a csv file in order to fetch each one def load_countries_to_fetch(self): input_csv_fname = getattr(self.__class__, 'INPUT_CSV', "input.csv") path = os.path.dirname(sys.modules[self.__class__.__module__].__file__) csv_fname = os.path.join(path, input_csv_fname) if not os.path.exists(csv_fname): return None colnames = ['country', 'code_3', 'code_2', 'region'] input_pd = pd.read_csv(csv_fname) input_pd.columns = colnames input_pd = input_pd.where((pd.notnull(input_pd)), None) return input_pd def run(self): countries = self.load_countries_to_fetch() for index, record in countries.iterrows(): self.country_fetcher(record['region'], record['country'], record['code_3'], record['code_2'])
43.828402
158
0.622924
import logging import pandas as pd import os import sys __all__ = ('EU_ZH_Fetcher',) from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher logger = logging.getLogger(__name__) class EU_ZH_Fetcher(BaseEpidemiologyFetcher): LOAD_PLUGIN = True SOURCE = 'EU_ZH' def fetch(self, url): return pd.read_csv(url) def clean_string(self, input): if isinstance(input, str): return input.replace('­', '') else: return input def parse_int(self, data): if pd.isna(data): return None if isinstance(data, str): data = data.replace('*', '') return int(data) def country_fetcher(self, region, country, code_3, code_2): logger.info("Processing number of cases in " + country) if code_3 == 'NOR': logger.warning("These GIDs not entirely accurate due to change in Norway's county boundaries, 2020.") if code_3 == 'BEL': logger.warning("These GIDs has MISSING region due to unknown data resourses, 2020.") url = 'https://github.com/covid19-eu-zh/covid19-eu-data/raw/master/dataset/covid-19-' + code_2 + '.csv' df = self.fetch(url) for index, record in df.iterrows(): # date Y-m-d or Y-m-dTH:M:S date = record['datetime'].split('T')[0] adm_area_2 = None # If no region is reported then all data is national if not hasattr(record, region): adm_area_1 = None gid = [code_3] # Ignore two known corrupted lines in the Polish data elif str(record[region])[:4] == 'http': continue elif pd.isna(record[region]) and code_3 == 'POL': continue # Austria's national data is reported with a blank region elif pd.isna(record[region]) and code_3 == 'AUT': adm_area_1 = None gid = [code_3] elif region == 'nuts_2' and code_3 == 'BEL': if self.clean_string(record['nuts_1']) == 'MISSING' or pd.isna(record[region]): continue success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr( input_adm_area_1=self.clean_string(record['nuts_1']), input_adm_area_2=self.clean_string(record[region]), return_original_if_failure=True, suppress_exception=True ) elif region == 'nuts_1' and code_3 == 'BEL': if pd.notna(record['nuts_2']): continue success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr( input_adm_area_1=self.clean_string(record[region]), return_original_if_failure=True, suppress_exception=True ) else: success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr( input_adm_area_1=self.clean_string(record[region]), return_original_if_failure=True, suppress_exception=True ) upsert_obj = { 'source': self.SOURCE, 'date': date, 'country': country, 'countrycode': code_3, 'adm_area_1': adm_area_1, 'adm_area_2': adm_area_2, 'adm_area_3': None, 'gid': gid } if hasattr(record, 'tests'): upsert_obj['tested'] = self.parse_int(record['tests']) if hasattr(record, 'cases'): upsert_obj['confirmed'] = self.parse_int(record['cases']) if hasattr(record, 'tests_positive'): upsert_obj['confirmed'] = self.parse_int(record['tests_positive']) if hasattr(record, 'recovered'): upsert_obj['recovered'] = self.parse_int(record['recovered']) if hasattr(record, 'deaths'): upsert_obj['dead'] = self.parse_int(record['deaths']) if hasattr(record, 'hospitalized'): upsert_obj['hospitalised'] = self.parse_int(record['hospitalized']) if hasattr(record, 'intensive_care'): upsert_obj['hospitalised_icu'] = self.parse_int(record['intensive_care']) if hasattr(record, 'quarantine'): upsert_obj['quarantined'] = self.parse_int(record['quarantine']) self.upsert_data(**upsert_obj) def load_countries_to_fetch(self): input_csv_fname = getattr(self.__class__, 'INPUT_CSV', "input.csv") path = os.path.dirname(sys.modules[self.__class__.__module__].__file__) csv_fname = os.path.join(path, input_csv_fname) if not os.path.exists(csv_fname): return None colnames = ['country', 'code_3', 'code_2', 'region'] input_pd = pd.read_csv(csv_fname) input_pd.columns = colnames input_pd = input_pd.where((pd.notnull(input_pd)), None) return input_pd def run(self): countries = self.load_countries_to_fetch() for index, record in countries.iterrows(): self.country_fetcher(record['region'], record['country'], record['code_3'], record['code_2'])
true
true
1c357c2992598df82ed165439dc034969fe00854
4,343
py
Python
messente_api/models/statistics_report_success.py
messente/messente-api-python
154abca9e6a226a5c97d8052c3f2631765503426
[ "Apache-2.0" ]
null
null
null
messente_api/models/statistics_report_success.py
messente/messente-api-python
154abca9e6a226a5c97d8052c3f2631765503426
[ "Apache-2.0" ]
null
null
null
messente_api/models/statistics_report_success.py
messente/messente-api-python
154abca9e6a226a5c97d8052c3f2631765503426
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Messente API [Messente](https://messente.com) is a global provider of messaging and user verification services. * Send and receive SMS, Viber, WhatsApp and Telegram messages. * Manage contacts and groups. * Fetch detailed info about phone numbers. * Blacklist phone numbers to make sure you're not sending any unwanted messages. Messente builds [tools](https://messente.com/documentation) to help organizations connect their services to people anywhere in the world. # noqa: E501 The version of the OpenAPI document: 1.4.0 Contact: messente@messente.com Generated by: https://openapi-generator.tech """ import inspect import pprint import re # noqa: F401 import six from messente_api.configuration import Configuration class StatisticsReportSuccess(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'reports': 'list[StatisticsReport]' } attribute_map = { 'reports': 'reports' } def __init__(self, reports=None, local_vars_configuration=None): # noqa: E501 """StatisticsReportSuccess - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._reports = None self.discriminator = None self.reports = reports @property def reports(self): """Gets the reports of this StatisticsReportSuccess. # noqa: E501 Array of report objects # noqa: E501 :return: The reports of this StatisticsReportSuccess. # noqa: E501 :rtype: list[StatisticsReport] """ return self._reports @reports.setter def reports(self, reports): """Sets the reports of this StatisticsReportSuccess. Array of report objects # noqa: E501 :param reports: The reports of this StatisticsReportSuccess. # noqa: E501 :type reports: list[StatisticsReport] """ if self.local_vars_configuration.client_side_validation and reports is None: # noqa: E501 raise ValueError("Invalid value for `reports`, must not be `None`") # noqa: E501 self._reports = reports def to_dict(self, serialize=False): """Returns the model properties as a dict""" result = {} def convert(x): if hasattr(x, "to_dict"): args = inspect.getargspec(x.to_dict).args if len(args) == 1: return x.to_dict() else: return x.to_dict(serialize) else: return x for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) attr = self.attribute_map.get(attr, attr) if serialize else attr if isinstance(value, list): result[attr] = list(map( lambda x: convert(x), value )) elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], convert(item[1])), value.items() )) else: result[attr] = convert(value) return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, StatisticsReportSuccess): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, StatisticsReportSuccess): return True return self.to_dict() != other.to_dict()
32.654135
473
0.606954
import inspect import pprint import re import six from messente_api.configuration import Configuration class StatisticsReportSuccess(object): openapi_types = { 'reports': 'list[StatisticsReport]' } attribute_map = { 'reports': 'reports' } def __init__(self, reports=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._reports = None self.discriminator = None self.reports = reports @property def reports(self): return self._reports @reports.setter def reports(self, reports): if self.local_vars_configuration.client_side_validation and reports is None: raise ValueError("Invalid value for `reports`, must not be `None`") self._reports = reports def to_dict(self, serialize=False): result = {} def convert(x): if hasattr(x, "to_dict"): args = inspect.getargspec(x.to_dict).args if len(args) == 1: return x.to_dict() else: return x.to_dict(serialize) else: return x for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) attr = self.attribute_map.get(attr, attr) if serialize else attr if isinstance(value, list): result[attr] = list(map( lambda x: convert(x), value )) elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], convert(item[1])), value.items() )) else: result[attr] = convert(value) return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, StatisticsReportSuccess): return False return self.to_dict() == other.to_dict() def __ne__(self, other): if not isinstance(other, StatisticsReportSuccess): return True return self.to_dict() != other.to_dict()
true
true
1c357c6fa74d5dc80c0005bc82d8ce47b5068ddd
612,712
py
Python
python/paddle/fluid/layers/nn.py
zmxdream/Paddle
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
[ "Apache-2.0" ]
2
2021-11-12T11:31:12.000Z
2021-12-05T10:30:28.000Z
python/paddle/fluid/layers/nn.py
zmxdream/Paddle
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
[ "Apache-2.0" ]
null
null
null
python/paddle/fluid/layers/nn.py
zmxdream/Paddle
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
[ "Apache-2.0" ]
1
2021-10-09T10:57:17.000Z
2021-10-09T10:57:17.000Z
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ All layers just related to the neural network. """ from __future__ import print_function import os import inspect import warnings import numpy as np import six import paddle from ..layer_helper import LayerHelper from ..initializer import Normal, Constant, NumpyArrayInitializer from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags from .. import dygraph_utils from ..param_attr import ParamAttr from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor from . import utils from .. import unique_name from functools import reduce from .. import core from ...utils import deprecated from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype import paddle from paddle.utils import deprecated from paddle import _C_ops __all__ = [ 'fc', 'embedding', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'chunk_eval', 'conv2d', 'conv3d', 'softmax', 'pool2d', 'pool3d', 'adaptive_pool2d', 'adaptive_pool3d', 'batch_norm', 'inplace_abn', 'instance_norm', 'data_norm', 'conv2d_transpose', 'conv3d_transpose', 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'reduce_prod', 'reduce_all', 'reduce_any', 'dropout', 'split', 'ctc_greedy_decoder', 'l2_normalize', 'matmul', 'topk', 'transpose', 'im2sequence', 'row_conv', 'multiplex', 'layer_norm', 'group_norm', 'spectral_norm', 'smooth_l1', 'one_hot', 'autoincreased_step_counter', 'reshape', 'squeeze', 'unsqueeze', 'lod_reset', 'lod_append', 'lrn', 'pad', 'pad_constant_like', 'label_smooth', 'roi_pool', 'roi_align', 'dice_loss', 'image_resize', 'image_resize_short', 'resize_linear', 'resize_bilinear', 'resize_trilinear', 'resize_nearest', 'gather', 'gather_nd', 'scatter', 'scatter_nd_add', 'scatter_nd', 'random_crop', 'mean_iou', 'relu', 'selu', 'log', 'crop', 'crop_tensor', 'elu', 'relu6', 'pow', 'stanh', 'hard_sigmoid', 'swish', 'prelu', 'brelu', 'leaky_relu', 'soft_relu', 'flatten', 'stack', 'pad2d', 'unstack', 'unique', 'unique_with_counts', 'expand', 'expand_as', 'scale', 'elementwise_add', 'elementwise_div', 'elementwise_sub', 'elementwise_mul', 'elementwise_max', 'elementwise_min', 'elementwise_pow', 'elementwise_mod', 'elementwise_floordiv', 'uniform_random_batch_size_like', 'gaussian_random', 'sampling_id', 'gaussian_random_batch_size_like', 'sum', 'slice', 'strided_slice', 'shape', 'rank', 'size', 'logical_and', 'logical_or', 'logical_xor', 'logical_not', 'clip', 'clip_by_norm', 'mean', 'mul', 'maxout', 'space_to_depth', 'affine_grid', 'affine_channel', 'similarity_focus', 'hash', 'grid_sampler', 'log_loss', 'add_position_encoding', 'bilinear_tensor_product', 'merge_selected_rows', 'get_tensor_from_selected_rows', 'shuffle_channel', 'temporal_shift', 'py_func', 'psroi_pool', 'prroi_pool', 'pixel_shuffle', 'fsp_matrix', 'continuous_value_model', 'where', 'sign', 'deformable_conv', 'unfold', 'deformable_roi_pooling', 'filter_by_instag', 'shard_index', 'hard_swish', 'mish', 'gather_tree', 'uniform_random', 'unbind', ] @dygraph_only def _elementwise_op_in_dygraph(x, y, axis=-1, act=None, use_mkldnn=False, op_name=None): op = getattr(_C_ops, op_name) out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) return dygraph_utils._append_activation_in_dygraph( out, act, use_mkldnn=use_mkldnn) def fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None): r""" :api_attr: Static Graph **Fully Connected Layer** This operator creates a fully connected layer in the network. It can take a Tensor(or LoDTensor) or a list of Tensor(or LoDTensor) as its inputs(see Args in detail). It creates a variable called weight for each input Tensor, which represents a fully connected weight matrix from each input unit to each output unit. The fully connected layer multiplies each input Tensor with its corresponding weight to produce an output Tensor with shape :math:`[M, size]` , where M is batch size. If a list of Tensor is given, the results of multiple output Tensors with shape :math:`[M, size]` will be summed up. If :attr:`bias_attr` is not None, a bias variable will be created and added to the output. Finally, if :attr:`act` is not None, it will be applied to the output as well. When the input is a single Tensor(or LoDTensor): .. math:: Out = Act({XW + b}) When the input is a list of Tensor(or LoDTensor): .. math:: Out = Act({\sum_{i=0}^{N-1}X_iW_i + b}) In the above equation: * :math:`N`: Number of the input. N equals to len(input) if input is list of Variable. * :math:`X_i`: The i-th input tensor. * :math:`W_i`: The i-th weights matrix corresponding i-th input tensor. * :math:`b`: The bias parameter created by this layer (if needed). * :math:`Act`: The activation function. * :math:`Out`: The output Tensor. .. code-block:: text Case 1: Given a single Tensor data_1, and num_flatten_dims = 2: data_1.data = [[[0.1, 0.2], [0.3, 0.4]]] data_1.shape = (1, 2, 2) # 1 is batch_size out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2) Then output is: out.data = [[0.83234344], [0.34936576]] out.shape = (1, 2, 1) Case 2: Given a list of Tensor: data_1.data = [[[0.1, 0.2], [0.3, 0.4]]] data_1.shape = (1, 2, 2) # 1 is batch_size data_2 = [[[0.1, 0.2, 0.3]]] data_2.shape = (1, 1, 3) out = fluid.layers.fc(input=[data_1, data_2], size=2) Then: out.data = [[0.18669507, 0.1893476]] out.shape = (1, 2) Args: input (Variable|list of Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` or a list of Tensor(or LoDTensor). The dimensions of the input Tensor is at least 2 and the data type should be float32 or float64. size(int): The number of output units in this layer, which also means the feature size of output Tensor(or LoDTensor). num_flatten_dims (int): The fc layer can accept an input Tensor with more than two dimensions. If this happens, the multidimensional tensor will first be flattened into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input Tensor is flattened: the first :attr:`num_flatten_dims` (inclusive, index starts from 1) dimensions will be flatten to form the first dimension of the final matrix (height of the matrix), and the rest :math:`rank(X) - num\_flatten\_dims` dimensions are flattened to form the second dimension of the final matrix (width of the matrix). For example, assuming that X is a 5-dimensional Tensor with a shape [2, 3, 4, 5, 6], and :attr:`num_flatten_dims` = 3. Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1. param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . act (str): Activation to be applied to the output of this layer, such as tanh, softmax, sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Variable: Tensor or LoDTensor calculated by fc layer. The data type is same with input. Raises: ValueError: If dimensions of the input Tensor is less than 2. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() # when input is single tensor data = fluid.data(name="data", shape=[-1, 32], dtype="float32") fc = fluid.layers.fc(input=data, size=1000, act="tanh") # when input are multiple tensors data_1 = fluid.data(name="data_1", shape=[-1, 32], dtype="float32") data_2 = fluid.data(name="data_2", shape=[-1, 36], dtype="float32") fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh") """ helper = LayerHelper("fc", **locals()) check_type(input, 'input', (list, tuple, Variable), 'fc') if isinstance(input, (list, tuple)): for i, input_x in enumerate(input): check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc') dtype = helper.input_dtype() check_dtype(dtype, 'input', ['float16', 'uint16', 'float32', 'float64'], 'fc') mul_results = [] for input_var, param_attr in helper.iter_inputs_and_params(): input_shape = input_var.shape if num_flatten_dims == -1: num_flatten_dims = len(input_shape) - 1 param_shape = [ reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) ] + [size] w = helper.create_parameter( attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) tmp = helper.create_variable_for_type_inference(dtype) helper.append_op( type="mul", inputs={"X": input_var, "Y": w}, outputs={"Out": tmp}, attrs={"x_num_col_dims": num_flatten_dims, "y_num_col_dims": 1}) mul_results.append(tmp) if len(mul_results) == 1: pre_bias = mul_results[0] else: pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}, attrs={"use_mkldnn": False}) # add bias pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims) # add activation return helper.append_activation(pre_activation) @deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding") def embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32'): r""" :api_attr: Static Graph **WARING:** This OP will be deprecated in a future release. This OP requires the last dimension of Tensor shape must be equal to 1. It is recommended to use fluid. :ref:`api_fluid_embedding` . The operator is used to lookup embeddings vector of ids provided by :attr:`input` . It automatically constructs a 2D embedding matrix based on the input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` . This OP requires the last dimension of Tensor shape must be equal to 1. The shape of output Tensor is generated by replacing the last dimension of the input Tensor shape with emb_size. **Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` , otherwise the program will throw an exception and exit. .. code-block:: text Case 1: input is a Tensor. padding_idx = -1 input.data = [[[1], [3]], [[2], [4]], [[4], [127]]] input.shape = [3, 2, 1] Given size = [128, 16] output is a Tensor: out.shape = [3, 2, 16] out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], [0.345421456, 0.524563927, ..., 0.144534654]], [[0.345249859, 0.124939536, ..., 0.194353745], [0.945345345, 0.435394634, ..., 0.435345365]], [[0.945345345, 0.435394634, ..., 0.435345365], [0.0, 0.0, ..., 0.0 ]]] # padding data The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127 It will pad all-zero data when ids is 127. Case 2: input is a LoDTensor with 1-level LoD. padding_idx = 0 input.lod = [[2, 3]] input.data = [[1], [3], [2], [4], [0]] input.shape = [5, 1] Given size = [128, 16] output is a LoDTensor: out.lod = [[2, 3]] out.shape = [5, 16] out.data = [[0.129435295, 0.244512452, ..., 0.436322452], [0.345421456, 0.524563927, ..., 0.144534654], [0.345249859, 0.124939536, ..., 0.194353745], [0.945345345, 0.435394634, ..., 0.435345365], [0.0, 0.0, ..., 0.0 ]] # padding data It will pad all-zero data when ids is 0. Args: input(Variable): A Tensor or LoDTensor with type int64, which contains the id information. The last dimension of Tensor shape must be equal to 1. The value of the input id should satisfy :math:`0<= id < size[0]` . size(tuple|list): The shape of lookup table parameter. It should have two elements which indicates the size of the dictionary of embeddings and the size of each embedding vector respectively. is_sparse(bool): The flag indicating whether to use sparse update. This parameter only affects the performance of the backwards gradient update. It is recommended to set True because sparse update is faster. But some optimizer does not support sparse update, such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` , :ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` , :ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` . In these case, is_sparse must be False. Default: False. is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used in multi-machine distributed CPU training. Default: False. padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size). If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup encounters :math:`padding\_idx` in id. And the padding data will not be updated while training. If set None, it makes no effect to output. Default: None. param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition, user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter. The local word vector needs to be transformed into numpy format, and the shape of local word vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer` is used to load custom or pre-trained word vectors. See code example 2 for details. dtype(str|core.VarDesc.VarType): It refers to the data type of output Tensor. It must be float32 or float64. Default: float32. Returns: Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` . Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle paddle.enable_static() data = fluid.data(name='x', shape=[None, 1], dtype='int64') # example 1 emb_1 = fluid.embedding(input=data, size=[128, 64]) # example 2: load custom or pre-trained word vectors weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format w_param_attrs = fluid.ParamAttr( name="emb_weight", learning_rate=0.5, initializer=fluid.initializer.NumpyArrayInitializer(weight_data), trainable=True) emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32') """ helper = LayerHelper('embedding', **locals()) check_variable_and_dtype(input, 'input', ['int64'], 'fluid.layers.embedding') check_dtype(dtype, 'dtype', ['uint16', 'float16', 'float32', 'float64'], 'fluid.layers.embedding') if is_distributed: is_distributed = False warnings.warn( "is_distributed is go out of use, `fluid.contrib.layers.sparse_embedding` is your needed" ) remote_prefetch = True if is_sparse else False w = helper.create_parameter( attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False) tmp = helper.create_variable_for_type_inference(dtype) padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( size[0] + padding_idx) helper.append_op( type='lookup_table', inputs={'Ids': input, 'W': w}, outputs={'Out': tmp}, attrs={ 'is_sparse': is_sparse, 'is_distributed': is_distributed, 'remote_prefetch': remote_prefetch, 'padding_idx': padding_idx }) return tmp def _pull_sparse(input, size, table_id, accessor_class, name="embedding", ctr_label_name="", padding_id=0, dtype='float32', scale_sparse_grad=True): r""" **Pull Fleet Sparse Layer** This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in Fleet lookup table. The result of this lookup is the embedding of each ID in the :attr:`input`. Args: input(Variable|list of Variable): Input is a Tensor<int64> Variable, which contains the IDs information. size(int): The embedding size parameter, which indicates the size of each embedding vector respectively. table_id(int): the fleet table id of this embedding. accessor_class(str): the pslib accessor of the table, default is DownpourCtrAccessor. ctr_label_name(str): the layer name of click. padding_id(int): the padding id during lookup, default is 0. dtype(str): The dtype refers to the data type of output tensor. Only supports float32 now. scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default is True. Returns: Variable|list of Variable: The tensor variable storing the embeddings of the \ supplied inputs. Examples: .. code-block:: python import paddle.fluid as fluid data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) emb = fluid.layers.nn._pull_sparse( input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor") """ helper = LayerHelper(name, **locals()) inputs = helper.multiple_input() outs = [helper.create_variable_for_type_inference(dtype)] input_names = [i.name for i in inputs] attrs = { 'EmbeddingDim': size, 'TableId': table_id, 'AccessorClass': accessor_class, 'CtrLabelName': ctr_label_name, 'PaddingId': padding_id, 'ScaleSparseGrad': scale_sparse_grad, 'InputNames': input_names, # this is only for compatible with embedding op 'is_distributed': True } # this is only for compatible with embedding op w, _ = helper.create_or_get_global_variable( name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True) helper.append_op( type='pull_sparse', inputs={'Ids': inputs, 'W': w}, outputs={'Out': outs}, attrs=attrs) if len(outs) == 1: return outs[0] return outs def _pull_sparse_v2(input, size, table_id, accessor_class, name="embedding", ctr_label_name="", padding_id=0, dtype='float32', scale_sparse_grad=True): r""" **Pull Fleet Sparse Layer** This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in Fleet lookup table. The result of this lookup is the embedding of each ID in the :attr:`input`. Args: input(Variable|list of Variable): Input is a Tensor<int64> Variable, which contains the IDs information. size(int): The embedding size parameter, which indicates the size of each embedding vector respectively. table_id(int): the pslib table id of this embedding. accessor_class(str): the fleet accessor of the table, default is DownpourCtrAccessor. ctr_label_name(str): the layer name of click. padding_id(int): the padding id during lookup, default is 0. dtype(str): The dtype refers to the data type of output tensor. Only supports float32 now. scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default is True. Returns: Variable|list of Variable: The tensor variable storing the embeddings of the \ supplied inputs. Examples: .. code-block:: python import paddle.fluid as fluid data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) emb = fluid.layers.nn._pull_sparse_v2( input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor") """ helper = LayerHelper(name, **locals()) inputs = helper.multiple_input() outs = [helper.create_variable_for_type_inference(dtype)] input_names = [i.name for i in inputs] attrs = { 'EmbeddingDim': size, 'TableId': table_id, 'AccessorClass': accessor_class, 'CtrLabelName': ctr_label_name, 'PaddingId': padding_id, 'ScaleSparseGrad': scale_sparse_grad, 'InputNames': input_names, # this is only for compatible with embedding op 'is_distributed': True } # this is only for compatible with embedding op w, _ = helper.create_or_get_global_variable( name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True) helper.append_op( type='pull_sparse_v2', inputs={'Ids': inputs, 'W': w}, outputs={'Out': outs}, attrs=attrs) if len(outs) == 1: return outs[0] return outs def _pull_gpups_sparse(input, size, dtype='float32', is_distributed=False, is_sparse=False): r""" **Pull GpuPS Sparse Layer** This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in GpuPS lookup table. The result of this lookup is the embedding of each ID in the :attr:`input`. Args: input(Variable|list of Variable): Input is a Tensor<int64> Variable, which contains the IDs information. size(int|list of int): The embedding size parameter of each input, which indicates the size of each embedding vector respectively. dtype(str): The dtype refers to the data type of output tensor. Only supports float32 now. Returns: Variable|list of Variable: The tensor variable storing the embeddings of the \ supplied inputs, whose size are indicated by size respectively. Examples: .. code-block:: python import paddle.fluid as fluid slots = [] data_1 = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) slots.append(data_1) data_2 = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) slots.append(data_2) embs = fluid.layers.pull_gpups_sparse(input=slots, size=[11, 35]) """ helper = LayerHelper('pull_gpups_sparse', **locals()) if dtype != 'float32': raise ValueError( "GpuPS only support float type embedding now, and your type is: " + dtype) helper.input_dtype() inputs = helper.multiple_input() outs = [ helper.create_variable_for_type_inference(dtype) for i in range(len(inputs)) ] w = helper.create_parameter( attr=helper.param_attr, shape=[11], dtype=dtype, is_bias=False) helper.append_op( type='pull_gpups_sparse', inputs={'Ids': inputs, 'W': w}, outputs={'Out': outs}, attrs={ 'size': size, 'is_distributed': is_distributed, 'is_sparse': is_sparse }) if len(outs) == 1: return outs[0] return outs def _pull_box_sparse(input, size, dtype='float32', is_distributed=False, is_sparse=False): r""" **Pull Box Sparse Layer** This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in BoxPS lookup table. The result of this lookup is the embedding of each ID in the :attr:`input`. Args: input(Variable|list of Variable): Input is a Tensor<int64> Variable, which contains the IDs information. size(int): The embedding size parameter, which indicates the size of each embedding vector respectively. dtype(str): The dtype refers to the data type of output tensor. Only supports float32 now. Returns: Variable|list of Variable: The tensor variable storing the embeddings of the \ supplied inputs. Examples: .. code-block:: python import paddle.fluid as fluid data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) emb = fluid.layers.pull_box_sparse(input=data, size=[11]) """ helper = LayerHelper('pull_box_sparse', **locals()) if dtype != 'float32': raise ValueError( "BoxPS only support float type embedding now, and your type is: " + dtype) helper.input_dtype() inputs = helper.multiple_input() outs = [ helper.create_variable_for_type_inference(dtype) for i in range(len(inputs)) ] w = helper.create_parameter( attr=helper.param_attr, shape=[size], dtype=dtype, is_bias=False) helper.append_op( type='pull_box_sparse', inputs={'Ids': inputs, 'W': w}, outputs={'Out': outs}, attrs={ 'size': size, 'is_distributed': is_distributed, 'is_sparse': is_sparse }) if len(outs) == 1: return outs[0] return outs @templatedoc() def linear_chain_crf(input, label, param_attr=None, length=None): """ :api_attr: Static Graph Linear Chain CRF. ${comment} Args: input(${emission_type}): ${emission_comment} label(${label_type}): ${label_comment} Length(${length_type}): ${length_comment} param_attr(ParamAttr): The attribute of the learnable parameter for transition parameter. Returns: output(${emission_exps_type}): ${emission_exps_comment} \n output(${transition_exps_type}): ${transition_exps_comment} \n output(${log_likelihood_type}): ${log_likelihood_comment} \n Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle paddle.enable_static() #define net structure, using LodTensor train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): input_data = fluid.data(name='input_data', shape=[-1,10], dtype='float32') label = fluid.data(name='label', shape=[-1,1], dtype='int') emission= fluid.layers.fc(input=input_data, size=10, act="tanh") crf_cost = fluid.layers.linear_chain_crf( input=emission, label=label, param_attr=fluid.ParamAttr( name='crfw', learning_rate=0.01)) use_cuda = False place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_program) #define data, using LoDTensor a = fluid.create_lod_tensor(np.random.rand(12,10).astype('float32'), [[3,3,4,2]], place) b = fluid.create_lod_tensor(np.array([[1],[1],[2],[3],[1],[1],[1],[3],[1],[1],[1],[1]]),[[3,3,4,2]] , place) feed1 = {'input_data':a,'label':b} loss= exe.run(train_program,feed=feed1, fetch_list=[crf_cost]) print(loss) #define net structure, using padding train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): input_data2 = fluid.data(name='input_data2', shape=[-1,10,10], dtype='float32') label2 = fluid.data(name='label2', shape=[-1,10,1], dtype='int') label_length = fluid.data(name='length', shape=[-1,1], dtype='int') emission2= fluid.layers.fc(input=input_data2, size=10, act="tanh", num_flatten_dims=2) crf_cost2 = fluid.layers.linear_chain_crf( input=emission2, label=label2, length=label_length, param_attr=fluid.ParamAttr( name='crfw', learning_rate=0.01)) use_cuda = False place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_program) #define data, using padding cc=np.random.rand(4,10,10).astype('float32') dd=np.random.rand(4,10,1).astype('int64') ll=np.array([[3],[3],[4],[2]]) feed2 = {'input_data2':cc,'label2':dd,'length':ll} loss2= exe.run(train_program,feed=feed2, fetch_list=[crf_cost2]) print(loss2) #[array([[ 7.8902354], # [ 7.3602567], # [ 10.004011], # [ 5.86721 ]], dtype=float32)] #you can use find_var to get transition parameter. transition=np.array(fluid.global_scope().find_var('crfw').get_tensor()) print(transition) """ check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'linear_chain_crf') check_variable_and_dtype(label, 'label', ['int64'], 'linear_chain_crf') helper = LayerHelper('linear_chain_crf', **locals()) size = input.shape[2] if length else input.shape[1] transition = helper.create_parameter( attr=helper.param_attr, shape=[size + 2, size], dtype=helper.input_dtype()) alpha = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) emission_exps = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) transition_exps = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) log_likelihood = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) this_inputs = { "Emission": [input], "Transition": transition, "Label": [label] } if length: this_inputs['Length'] = [length] helper.append_op( type='linear_chain_crf', inputs=this_inputs, outputs={ "Alpha": [alpha], "EmissionExps": [emission_exps], "TransitionExps": transition_exps, "LogLikelihood": log_likelihood }) return log_likelihood @templatedoc() def crf_decoding(input, param_attr, label=None, length=None): """ :api_attr: Static Graph ${comment} Args: input(Tensor): ${emission_comment} param_attr (ParamAttr|None): To specify the weight parameter attribute. Default: None, which means the default weight parameter property is used. See usage for details in :ref:`api_paddle_fluid_param_attr_ParamAttr` . label(${label_type}, optional): ${label_comment} length(${length_type}, optional): ${length_comment} Returns: Tensor: ${viterbi_path_comment} Examples: .. code-block:: python import paddle paddle.enable_static() # LoDTensor-based example num_labels = 10 feature = paddle.static.data(name='word_emb', shape=[-1, 784], dtype='float32', lod_level=1) label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64', lod_level=1) emission = paddle.static.nn.fc(feature, size=num_labels) crf_cost = paddle.fluid.layers.linear_chain_crf(input=emission, label=label, param_attr=paddle.ParamAttr(name="crfw")) crf_decode = paddle.static.nn.crf_decoding(input=emission, param_attr=paddle.ParamAttr(name="crfw")) # Common tensor example num_labels, max_len = 10, 20 feature = paddle.static.data(name='word_emb_pad', shape=[-1, max_len, 784], dtype='float32') label = paddle.static.data(name='label_pad', shape=[-1, max_len, 1], dtype='int64') length = paddle.static.data(name='length', shape=[-1, 1], dtype='int64') emission = paddle.static.nn.fc(feature, size=num_labels, num_flatten_dims=2) crf_cost = paddle.fluid.layers.linear_chain_crf(input=emission, label=label, length=length, param_attr=paddle.ParamAttr(name="crfw_pad")) crf_decode = paddle.static.nn.crf_decoding(input=emission, length=length, param_attr=paddle.ParamAttr(name="crfw_pad")) """ check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'crf_decoding') helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) viterbi_path = helper.create_variable_for_type_inference( dtype=core.VarDesc.VarType.INT64) inputs = {"Emission": [input], "Transition": transition, "Label": label} if length: inputs['Length'] = length helper.append_op( type='crf_decoding', inputs=inputs, outputs={"ViterbiPath": [viterbi_path]}) return viterbi_path @templatedoc() def cos_sim(X, Y): """ ${comment} Args: X (Tensor): ${x_comment}. Y (Tensor): ${y_comment}. Returns: A Tensor representing the output of cosine(X, Y). Examples: .. code-block:: python import paddle x = paddle.rand(shape=[3, 7], dtype='float32') y = paddle.rand(shape=[1, 7], dtype='float32') out = paddle.fluid.layers.cos_sim(x, y) print(out) """ check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim') check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim') helper = LayerHelper('cos_sim', **locals()) out = helper.create_variable_for_type_inference(dtype=X.dtype) xnorm = helper.create_variable_for_type_inference(dtype=X.dtype) ynorm = helper.create_variable_for_type_inference(dtype=X.dtype) helper.append_op( type='cos_sim', inputs={'X': [X], 'Y': [Y]}, outputs={'Out': [out], 'XNorm': [xnorm], 'YNorm': [ynorm]}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.dropout") def dropout(x, dropout_prob, is_test=None, seed=None, name=None, dropout_implementation="downgrade_in_infer"): """ Computes dropout. Drop or keep each element of `x` independently. Dropout is a regularization technique for reducing overfitting by preventing neuron co-adaption during training. The dropout operator randomly sets (according to the given dropout probability) the outputs of some units to zero, while others are remain unchanged. dropout op can be removed from the program to make the program more efficient. Args: x (Variable): The input tensor variable. The data type is float16 or float32 or float64. dropout_prob (float): Probability of setting units to zero. is_test (bool): A flag indicating whether it is in test phrase or not. Default None, in dynamic graph, it use global tracer mode; in static graph, it means False. seed (int): A Python integer used to create random seeds. If this parameter is set to None, a random seed is used. NOTE: If an integer seed is given, always the same output units will be dropped. DO NOT use a fixed seed in training.Default: None. name (str|None): A name for this layer(optional). If set None, the layer will be named automatically. dropout_implementation(string): ['downgrade_in_infer'(default)|'upscale_in_train'] 1. downgrade_in_infer(default), downgrade the outcome at inference - train: out = input * mask - inference: out = input * (1.0 - dropout_prob) (mask is a tensor same shape with input, value is 0 or 1 ratio of 0 is dropout_prob) 2. upscale_in_train, upscale the outcome at training time - train: out = input * mask / ( 1.0 - dropout_prob ) - inference: out = input (mask is a tensor same shape with input, value is 0 or 1 ratio of 0 is dropout_prob) Returns: A Variable holding Tensor representing the dropout, has same shape and data type with `x`. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32") dropped = fluid.layers.dropout(x, dropout_prob=0.5) """ # fast return for p == 0 if dropout_prob == 0: return x if in_dygraph_mode(): if (seed is None or seed == 0) and default_main_program().random_seed != 0: seed = default_main_program().random_seed if is_test is None: is_test = not _dygraph_tracer()._train_mode out, mask = _C_ops.dropout( x, 'dropout_prob', dropout_prob, 'is_test', is_test, 'fix_seed', seed is not None, 'seed', seed if seed is not None else 0, 'dropout_implementation', dropout_implementation) return out def get_attrs(prog, dropout_prob, is_test, seed): if (seed is None or seed == 0) and prog.random_seed != 0: seed = prog.random_seed attrs = { 'dropout_prob': dropout_prob, 'is_test': is_test, 'fix_seed': seed is not None, 'seed': seed if seed is not None else 0, 'dropout_implementation': dropout_implementation, } return attrs helper = LayerHelper('dropout', **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'dropout') out = helper.create_variable_for_type_inference(dtype=x.dtype) mask = helper.create_variable_for_type_inference( dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed) helper.append_op( type='dropout', inputs={'X': [x]}, outputs={'Out': [out], 'Mask': [mask]}, attrs=attrs) return out @templatedoc() def chunk_eval(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None, seq_length=None): r""" This operator computes the precision, recall and F1-score for chunk detection. It is often used in sequence tagging tasks, such as Named Entity Recognition(NER). For some basics of chunking, please refer to `Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>`_ . This operator supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes. Here is a NER example for the usage of these tagging schemes: .. code-block:: python ====== ====== ====== ===== == ============ ===== ===== ===== == ========= Li Ming works at Agricultural Bank of China in Beijing. ====== ====== ====== ===== == ============ ===== ===== ===== == ========= IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC ====== ====== ====== ===== == ============ ===== ===== ===== == ========= There are three chunk types(named entity types) including PER(person), ORG(organization) and LOC(location), and we can see that the labels have the form `<tag type>-<chunk type>` . Since the implementation of this operator actually uses label ids rather than label strings, to make it work, there should be a way to map label ids to tag types and chunk types. This operator uses the following way to do mapping: .. code-block:: python tag_type = label % num_tag_type chunk_type = label / num_tag_type where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type` is the num of chunk types, and `tag_type` get its value from the following table. .. code-block:: python Scheme Begin Inside End Single plain 0 - - - IOB 0 1 - - IOE - 0 1 - IOBES 0 1 2 3 Accordingly, in the above NER example, if the tagging scheme is IOB and chunk types are ORG, PER and LOC, then the label ids would be as follows: .. code-block:: python B-ORG 0 I-ORG 1 B-PER 2 I-PER 3 B-LOC 4 I-LOC 5 O 6 With which we can map each label id to the corresponding tag type and chunk type correctly. Args: input (Tensor): A Tensor representing the predicted labels from the network. Its shape would be `[N, M, 1]`, where `N` stands for batch size, `M` for sequence length. The data type should be int64. label (Tensor): A Tensor representing the ground-truth labels. It should have the same shape, lod and data type as ``input`` . chunk_scheme (str): Indicate the tagging schemes used here. The value must be IOB, IOE, IOBES or plain. num_chunk_types (int): The number of chunk types. excluded_chunk_types (list, optional): Indicate the chunk types shouldn't be taken into account. It should be a list of chunk type ids(integer). Default None. seq_length(Tensor, optional): A 1D Tensor containing the length of each sequence when ``input`` and ``label`` are Tensor. Default None. Returns: tuple: A tuple including precision, recall, F1-score, chunk number detected, \ chunk number in ground-truth, chunk number correctly detected. Each \ is a Tensor with shape `[1]`. The data type of precision, recall and \ F1-score all is float32, and the others' data type all is int64. Examples: .. code-block:: python import paddle.fluid as fluid dict_size = 10000 label_dict_len = 7 sequence = fluid.data( name='id', shape=[None, 1], lod_level=1, dtype='int64') embedding = fluid.embedding( input=sequence, size=[dict_size, 512]) hidden = fluid.layers.fc(input=embedding, size=512) label = fluid.data( name='label', shape=[None, 1], lod_level=1, dtype='int64') crf = fluid.layers.linear_chain_crf( input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw")) crf_decode = fluid.layers.crf_decoding( input=hidden, param_attr=fluid.ParamAttr(name="crfw")) fluid.layers.chunk_eval( input=crf_decode, label=label, chunk_scheme="IOB", num_chunk_types=int((label_dict_len - 1) / 2)) """ helper = LayerHelper("chunk_eval", **locals()) check_variable_and_dtype(input, 'input', ['int64'], 'chunk_eval') check_variable_and_dtype(label, 'label', ['int64'], 'chunk_eval') # prepare output precision = helper.create_variable_for_type_inference(dtype="float32") recall = helper.create_variable_for_type_inference(dtype="float32") f1_score = helper.create_variable_for_type_inference(dtype="float32") num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64") num_label_chunks = helper.create_variable_for_type_inference(dtype="int64") num_correct_chunks = helper.create_variable_for_type_inference( dtype="int64") this_input = {"Inference": [input], "Label": [label]} if seq_length is not None: this_input["SeqLength"] = [seq_length] helper.append_op( type="chunk_eval", inputs=this_input, outputs={ "Precision": [precision], "Recall": [recall], "F1-Score": [f1_score], "NumInferChunks": [num_infer_chunks], "NumLabelChunks": [num_label_chunks], "NumCorrectChunks": [num_correct_chunks] }, attrs={ "num_chunk_types": num_chunk_types, "chunk_scheme": chunk_scheme, "excluded_chunk_types": excluded_chunk_types or [] }) return (precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks) @deprecated(since="2.0.0", update_to="paddle.nn.functional.softmax") def softmax(input, use_cudnn=True, name=None, axis=-1): r""" This operator implements the softmax layer. The calculation process is as follows: 1. The dimension :attr:`axis` of the ``input`` will be permuted to the last. 2. Then the input tensor will be logically flattened to a 2-D matrix. The matrix's second dimension(row length) is the same as the dimension :attr:`axis` of the input tensor, and the first dimension(column length) is the product of all other dimensions of the input tensor. For each row of the matrix, the softmax operator squashes the K-dimensional(K is the width of the matrix, which is also the size of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a K-dimensional vector of real values in the range [0, 1] that add up to 1. 3. After the softmax operation is completed, the inverse operations of steps 1 and 2 are performed to restore the two-dimensional matrix to the same dimension as the ``input``. It computes the exponential of the given dimension and the sum of exponential values of all the other dimensions in the K-dimensional vector input. Then the ratio of the exponential of the given dimension and the sum of exponential values of all the other dimensions is the output of the softmax operator. For each row :math:`i` and each column :math:`j` in the matrix, we have: .. math:: Out[i, j] = \\frac{\\exp(X[i, j])}{\\sum_j(exp(X[i, j])} Example: .. code-block:: text Case 1: Input: X.shape = [2, 3, 4] X.data = [[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 8.0, 9.0]], [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [6.0, 7.0, 8.0, 9.0]]] Attrs: axis = -1 Output: Out.shape = [2, 3, 4] Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], [0.0320586 , 0.08714432, 0.23688282, 0.64391426], [0.07232949, 0.19661193, 0.19661193, 0.53444665]], [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], [0.0320586 , 0.08714432, 0.23688282, 0.64391426], [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] Case 2: Input: X.shape = [2, 3, 4] X.data = [[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 8.0, 9.0]], [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [6.0, 7.0, 8.0, 9.0]]] Attrs: axis = 1 Output: Out.shape = [2, 3, 4] Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], [0.01786798, 0.01786798, 0.04661262, 0.04661262], [0.97555875, 0.97555875, 0.93623955, 0.93623955]], [[0.00490169, 0.00490169, 0.00490169, 0.00490169], [0.26762315, 0.26762315, 0.26762315, 0.26762315], [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] Args: input (Tensor): The input tensor. A multi-dimension ``Tensor`` with type float32 or float64. use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \ library is installed. To improve performance, set use_cudnn to True by default. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Default: None. will be named automatically. Default: None. axis (int, optional): The index of dimension to perform softmax calculations, it should be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of input tensor. Default: -1. -1 means the last dimension. Returns: Tensor: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` . Examples: .. code-block:: python import paddle import paddle.nn.functional as F x = paddle.to_tensor([[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 8.0, 9.0]], [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [6.0, 7.0, 8.0, 9.0]]], dtype='float32') y = F.softmax(x, axis=1) print(y) # [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], # [0.01786798, 0.01786798, 0.04661262, 0.04661262], # [0.97555870, 0.97555870, 0.93623954, 0.93623954]], # [[0.00490169, 0.00490169, 0.00490169, 0.00490169], # [0.26762316, 0.26762316, 0.26762316, 0.26762316], # [0.72747517, 0.72747517, 0.72747517, 0.72747517]]] """ if in_dygraph_mode(): return _C_ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn) inputs = {"X": [input]} attrs = {"axis": axis, "use_cudnn": use_cudnn} helper = LayerHelper('softmax', **locals()) check_variable_and_dtype(input, 'input/x', ['float16', 'float32', 'float64'], 'softmax') dtype = helper.input_dtype() softmax_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="softmax", inputs={"X": input}, outputs={"Out": softmax_out}, attrs=attrs) return softmax_out def conv2d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCHW"): r""" :api_attr: Static Graph The convolution2D layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input and Output are in NCHW or NHWC format, where N is batch size, C is the number of channels, H is the height of the feature, and W is the width of the feature. Filter is in MCHW format, where M is the number of output image channels, C is the number of input image channels, H is the height of the filter, and W is the width of the filter. If the groups is greater than 1, C will equal the number of input image channels divided by the groups. Please refer to UFLDL's `convolution <http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_ for more details. If bias attribution and activation type are provided, bias is added to the output of the convolution, and the corresponding activation function is applied to the final result. For each input :math:`X`, the equation is: .. math:: Out = \sigma (W \\ast X + b) Where: * :math:`X`: Input value, a tensor with NCHW or NHWC format. * :math:`W`: Filter value, a tensor with MCHW format. * :math:`\\ast`: Convolution operation. * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. * :math:`\\sigma`: Activation function. * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. Example: - Input: Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)` - Output: Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where .. math:: H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 Args: input (Tensor): The input is 4-D Tensor with shape [N, C, H, W], the data type of input is float16 or float32 or float64. num_filters(int): The number of filter. It is as same as the output image channel. filter_size (int|tuple): The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_height, filter_size_width). Otherwise, filter_size_height = filter_size_width =\ filter_size. stride (int|tuple): The stride size. It means the stride in convolution. If stride is a tuple, it must contain two integers, (stride_height, stride_width). Otherwise, stride_height = stride_width = stride. Default: stride = 1. padding (string|int|list|tuple): The padding size. It means the number of zero-paddings on both sides for each dimension.If `padding` is a string, either 'VALID' or 'SAME' which is the padding algorithm. If padding size is a tuple or list, it could be in three forms: `[pad_height, pad_width]` or `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCHW"`, `padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`. when `data_format` is `"NHWC"`, `pool_padding` can be in the form `[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. Default: padding = 0. dilation (int|tuple): The dilation size. It means the spacing between the kernel points. If dilation is a tuple, it must contain two integers, (dilation_height, dilation_width). Otherwise, dilation_height = dilation_width = dilation. Default: dilation = 1. groups (int): The groups number of the Conv2d Layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups=1. param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights of conv2d. If it is set to None or one attribute of ParamAttr, conv2d will create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None. bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d. If it is set to False, no bias will be added to the output units. If it is set to None or one attribute of ParamAttr, conv2d will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn library is installed. Default: True act (str): Activation type, if it is set to None, activation is not appended. Default: None name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. Returns: A Tensor representing the conv2d, whose data type is the same with input. If act is None, the tensor storing the convolution result, and if act is not None, the tensor storing convolution and non-linearity activation result. Raises: ValueError: If the type of `use_cudnn` is not bool. ValueError: If `data_format` is not "NCHW" or "NHWC". ValueError: If the channel dimmention of the input is less than or equal to zero. ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 or the element corresponding to the input's channel is not 0. ShapeError: If the input is not 4-D Tensor. ShapeError: If the input's dimension size and filter's dimension size not equal. ShapeError: If the dimension size of input minus the size of `stride` is not 2. ShapeError: If the number of input channels is not equal to filter's channels * groups. ShapeError: If the number of output channels is not be divided by groups. Examples: .. code-block:: python import paddle paddle.enable_static() data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32') conv2d = paddle.static.nn.conv2d(input=data, num_filters=2, filter_size=3, act="relu") print(conv2d.shape) # [-1, 2, 30, 30] """ check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'conv2d') if len(input.shape) != 4: raise ValueError("Input size should be 4, " "but received {}".format(len(input.shape))) num_channels = input.shape[1] if not isinstance(use_cudnn, bool): raise ValueError("Attr(use_cudnn) should be True or False. Received " "Attr(use_cudnn): %s. " % str(use_cudnn)) if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " "Attr(data_format): %s." % str(data_format)) channel_last = (data_format == "NHWC") num_channels = input.shape[3] if channel_last else input.shape[1] if num_channels < 0: raise ValueError( "The channel dimmention of the input(%s) should be defined. " "Received: %s." % (str(input.shape), str(num_channels))) assert param_attr is not False, "param_attr should not be False here." if groups is None: num_filter_channels = num_channels elif groups <= 0: raise ValueError("the groups of input must be greater than 0, " "but received the groups of input is {}".format( groups)) else: if num_channels % groups != 0: raise ValueError( "the channel of input must be divisible by groups," "received: the channel of input is {}, the shape of input is {}" ", the groups is {}".format(num_channels, input.shape, groups)) num_filter_channels = num_channels // groups l_type = 'conv2d' if (num_channels == groups and num_filters % num_channels == 0 and not use_cudnn): l_type = 'depthwise_conv2d' if (num_channels == groups and num_filters % num_channels == 0 and core.is_compiled_with_rocm()): l_type = 'depthwise_conv2d' # NPU only supports depthwise_conv2d when "input_channel = output_channel = groups" if core.is_compiled_with_npu(): if (num_channels == groups and num_channels == num_filters): l_type = 'depthwise_conv2d' else: l_type = 'conv2d' helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') stride = utils.convert_to_list(stride, 2, 'stride') dilation = utils.convert_to_list(dilation, 2, 'dilation') # padding def _update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 4: if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:3] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 4, 'padding') if utils._is_symmetric_padding(padding, 2): padding = [padding[0], padding[2]] else: padding = utils.convert_to_list(padding, 2, 'padding') return padding padding_algorithm = "EXPLICIT" if isinstance(padding, str): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % str(padding)) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0] elif padding == "SAME": padding_algorithm = "SAME" padding = [0, 0] padding = _update_padding(padding, data_format) filter_shape = [num_filters, int(num_filter_channels)] + filter_size def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * num_channels if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " "filter size.".format(filter_elem_num)) std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, default_initializer=_get_default_param_initializer()) pre_bias = helper.create_variable_for_type_inference(dtype) if (core.is_compiled_with_cuda() and paddle.fluid.get_flags( "FLAGS_conv2d_disable_cudnn")["FLAGS_conv2d_disable_cudnn"]): use_cudnn = False helper.append_op( type=l_type, inputs={ 'Input': input, 'Filter': filter_param, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, 'use_mkldnn': False, 'fuse_relu_before_depthwise_conv': False, "padding_algorithm": padding_algorithm, "data_format": data_format, }) if data_format == 'NCHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) else: pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4) return helper.append_activation(pre_act) def conv3d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCDHW"): r""" :api_attr: Static Graph The convolution3D layer calculates the output based on the input, filter and strides, paddings, dilations, groups parameters. Input(Input) and Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of channels, D is the depth of the feature, H is the height of the feature, and W is the width of the feature. Convlution3D is similar with Convlution2D but adds one dimension(depth). If bias attribution and activation type are provided, bias is added to the output of the convolution, and the corresponding activation function is applied to the final result. For each input :math:`X`, the equation is: .. math:: Out = \sigma (W \\ast X + b) In the above equation: * :math:`X`: Input value, a tensor with NCDHW or NDHWC format. * :math:`W`: Filter value, a tensor with MCDHW format. * :math:`\\ast`: Convolution operation. * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. * :math:`\\sigma`: Activation function. * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. Example: - Input: Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)` - Output: Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` Where .. math:: D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\ H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\ W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1 Args: input (Tensor): The input is 5-D Tensor with shape [N, C, D, H, W], the data type of input is float16 or float32 or float64. num_filters(int): The number of filter. It is as same as the output image channel. filter_size (int|tuple): The filter size. If filter_size is a tuple, it must contain three integers, (filter_size_depth, filter_size_height, filter_size_width). Otherwise, filter_size_depth = filter_size_height = \ filter_size_width = filter_size. stride (int|tuple): The stride size. It means the stride in convolution. If stride is a tuple, it must contain three integers, (stride_depth, stride_height, stride_width). Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1. padding (string|int|list|tuple): The padding size. It means the number of zero-paddings on both sides for each dimension. If `padding` is a string, either 'VALID' or 'SAME' which is the padding algorithm. If padding size is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`. when `data_format` is `"NDHWC"`, `pool_padding` can be in the form `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. Default: padding = 0. dilation (int|tuple): The dilation size. It means the spacing between the kernel points. If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height, dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation. Default: dilation = 1. groups (int): The groups number of the Conv3d Layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups=1 param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights of conv3d. If it is set to None or one attribute of ParamAttr, conv3d will create ParamAttr as param_attr. If it is set to None, the parameter is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None. bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d. If it is set to False, no bias will be added to the output units. If it is set to None or one attribute of ParamAttr, conv3d will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn library is installed. Default: True act (str): Activation type, if it is set to None, activation is not appended. Default: None. name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. Returns: A Variable holding Tensor representing the conv3d, whose data type is the same with input. If act is None, the tensor variable storing the convolution result, and if act is not None, the tensor variable storing convolution and non-linearity activation result. Raises: ValueError: If the type of `use_cudnn` is not bool. ValueError: If `data_format` is not "NCDHW" or "NDHWC". ValueError: If the channel dimmention of the input is less than or equal to zero. ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 or the element corresponding to the input's channel is not 0. ShapeError: If the input is not 5-D Tensor. ShapeError: If the input's dimension size and filter's dimension size not equal. ShapeError: If the dimension size of input minus the size of `stride` is not 2. ShapeError: If the number of input channels is not equal to filter's channels * groups. ShapeError: If the number of output channels is not be divided by groups. Examples: .. code-block:: python import paddle import numpy as np paddle.enable_static() data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32') param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001) res = paddle.static.nn.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) place = paddle.CPUPlace() exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) x = np.random.rand(1, 3, 12, 32, 32).astype("float32") output = exe.run(feed={"data": x}, fetch_list=[res]) print(output) """ l_type = 'conv3d' assert param_attr is not False, "param_attr should not be False here." helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() if not isinstance(use_cudnn, bool): raise ValueError("Attr(use_cudnn) should be True or False. Received " "Attr(use_cudnn): %s. " % str(use_cudnn)) if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " "Attr(data_format): %s." % str(data_format)) channel_last = (data_format == "NDHWC") if len(input.shape) != 5: raise ValueError( "Input should be 5D tensor, but received input with the shape of {}". format(input.shape)) num_channels = input.shape[4] if channel_last else input.shape[1] if num_channels < 0: raise ValueError( "The channel dimmention of the input(%s) should be defined. " "Received: %s." % (str(input.shape), str(num_channels))) if groups is None: num_filter_channels = num_channels elif groups <= 0: raise ValueError( "the groups of conv3d should be greater than 0. Received groups: {}". format(groups)) else: if num_channels % groups != 0: raise ValueError( "The number of input channels must be divisible by Attr(groups). " "Received: number of channels(%s), groups(%s)." % (str(num_channels), str(groups))) num_filter_channels = num_channels // groups filter_size = utils.convert_to_list(filter_size, 3, 'filter_size') stride = utils.convert_to_list(stride, 3, 'stride') dilation = utils.convert_to_list(dilation, 3, 'dilation') def _update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 5: if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:4] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 6, 'padding') if utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] elif is_list_or_tuple(padding) and len(padding) == 6: padding = utils.convert_to_list(padding, 6, 'padding') if utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] else: padding = utils.convert_to_list(padding, 3, 'padding') return padding padding_algorithm = "EXPLICIT" if isinstance(padding, str): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % str(padding)) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0, 0] elif padding == "SAME": padding_algorithm = "SAME" padding = [0, 0, 0] padding = _update_padding(padding, data_format) input_shape = input.shape filter_shape = [num_filters, num_filter_channels] + filter_size def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * filter_size[ 2] * num_channels if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " "filter size.".format(filter_elem_num)) std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, default_initializer=_get_default_param_initializer()) pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, inputs={ 'Input': input, 'Filter': filter_param, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, 'use_mkldnn': False, "padding_algorithm": padding_algorithm, "data_format": data_format, }) if data_format == 'NCDHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) else: pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5) return helper.append_activation(pre_act) @templatedoc() def pool2d(input, pool_size=-1, pool_type="max", pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCHW"): """ ${comment} Args: input (Variable): The input tensor of pooling operator which is a 4-D tensor with shape [N, C, H, W]. The format of input tensor is `"NCHW"` or `"NHWC"`, where `N` is batch size, `C` is the number of channels, `H` is the height of the feature, and `W` is the width of the feature. The data type if float32 or float64. pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two integers, (pool_size_Height, pool_size_Width). Otherwise, the pool kernel size will be a square of an int. pool_type: ${pooling_type_comment} pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list, it must contain two integers, (pool_stride_Height, pool_stride_Width). Otherwise, the pool stride size will be a square of an int. pool_padding (string|int|list|tuple): The pool padding. If `pool_padding` is a string, either 'VALID' or 'SAME' which is the padding algorithm. If pool padding size is a tuple or list, it could be in three forms: `[pad_height, pad_width]` or `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCHW"`, `pool_padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`. when `data_format` is `"NHWC"`, `pool_padding` can be in the form `[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. Otherwise, the pool padding size will be a square of an int. global_pooling (bool): ${global_pooling_comment} use_cudnn (bool): ${use_cudnn_comment} ceil_mode (bool): ${ceil_mode_comment} name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. exclusive (bool): Whether to exclude padding points in average pooling mode, default is `true`. data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. Returns: Variable: The output tensor of pooling result. The data type is same as input tensor. Raises: ValueError: If `pool_type` is not "max" nor "avg". ValueError: If `global_pooling` is False and `pool_size` is -1. TypeError: If `use_cudnn` is not a bool value. ValueError: If `data_format` is not "NCHW" or "NHWC". ValueError: If `pool_padding` is a string, but not "SAME" or "VALID". ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True. ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero. ShapeError: If the input is not a 4-D or 5-D Tensor. ShapeError: If the dimension of input minus the size of `pool_stride` is not 2. ShapeError: If the size of `pool_size` and `pool_stride` is not equal. ShapeError: If the output's shape calculated is not greater than 0. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') # max pool2d pool2d = fluid.layers.pool2d( input = data, pool_size = 2, pool_type = "max", pool_stride = 1, global_pooling=False) # average pool2d pool2d = fluid.layers.pool2d( input = data, pool_size = 2, pool_type = "avg", pool_stride = 1, global_pooling=False) # global average pool2d pool2d = fluid.layers.pool2d( input = data, pool_size = 2, pool_type = "avg", pool_stride = 1, global_pooling=True) # Attr(pool_padding) is a list with 4 elements, Attr(data_format) is "NCHW". out_1 = fluid.layers.pool2d( input = data, pool_size = 3, pool_type = "avg", pool_stride = 1, pool_padding = [1, 2, 1, 0], data_format = "NCHW") # Attr(pool_padding) is a string, Attr(data_format) is "NCHW". out_2 = fluid.layers.pool2d( input = data, pool_size = 3, pool_type = "avg", pool_stride = 1, pool_padding = "VALID", data_format = "NCHW") """ if pool_type not in ["max", "avg"]: raise ValueError( "Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.", str(pool_type)) if global_pooling is False and pool_size == -1: raise ValueError( "When Attr(global_pooling) is False, Attr(pool_size) must be passed " "and be a valid value. Received pool_size: %s." % str(pool_size)) if not isinstance(use_cudnn, bool): raise TypeError("Attr(use_cudnn) should be True or False. Received " "Attr(use_cudnn): %s." % str(use_cudnn)) if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " "Attr(data_format): %s." % str(data_format)) pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride') def update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 4: if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:3] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 4, 'padding') if utils._is_symmetric_padding(padding, 2): padding = [padding[0], padding[2]] else: padding = utils.convert_to_list(padding, 2, 'padding') return padding padding_algorithm = "EXPLICIT" if isinstance(pool_padding, str): pool_padding = pool_padding.upper() if pool_padding not in ["SAME", "VALID"]: raise ValueError( "Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'." % str(pool_padding)) if pool_padding == "VALID": padding_algorithm = "VALID" pool_padding = [0, 0] if ceil_mode != False: raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode) must be False. " "Received ceil_mode: True.") elif pool_padding == "SAME": padding_algorithm = "SAME" pool_padding = [0, 0] pool_padding = update_padding(pool_padding, data_format) op_type = 'pool2d' helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=op_type, inputs={"X": input}, outputs={"Out": pool_out}, attrs={ "pooling_type": pool_type, "ksize": pool_size, "global_pooling": global_pooling, "strides": pool_stride, "paddings": pool_padding, "padding_algorithm": padding_algorithm, "use_cudnn": use_cudnn, "ceil_mode": ceil_mode, "use_mkldnn": False, "exclusive": exclusive, "data_format": data_format, }) return pool_out @templatedoc() def pool3d(input, pool_size=-1, pool_type="max", pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCDHW"): """ ${comment} Args: input (Variable): The input tensor of pooling operator, which is a 5-D tensor with shape [N, C, D, H, W]. The format of input tensor is `"NCDHW"` or `"NDHWC"`, where `N` is batch size, `C` is the number of channels, `D` is the depth of the feature, `H` is the height of the feature, and `W` is the width of the feature. pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain three integers, (pool_size_Depth, pool_size_Height, pool_size_Width). Otherwise, the pool kernel size will be the cube of an int. pool_type (string): ${pooling_type_comment} pool_stride (string|int|list|tuple)): The pool padding. If `pool_padding` is a string, either 'VALID' or 'SAME' which is the padding algorithm. If pool stride size is a tuple or list, it must contain three integers, `[stride_Depth, stride_Height, stride_Width]`. Otherwise, the pool stride size will be a cube of an int. pool_padding (int|list|tuple): The pool padding size. If pool padding size is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`. when `data_format` is `"NDHWC"`, `pool_padding` can be in the form `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. global_pooling (bool): ${global_pooling_comment} use_cudnn (bool): ${use_cudnn_comment} ceil_mode (bool): ${ceil_mode_comment} name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. exclusive (bool): Whether to exclude padding points in average pooling mode, default is true. data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`. The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. Returns: Variable: The output tensor of pooling result. The data type is same as input tensor. Raises: ValueError: If `pool_type` is not "max" nor "avg". ValueError: If `global_pooling` is False and `pool_size` is -1. TypeError: If `use_cudnn` is not a bool value. ValueError: If `data_format` is not "NCDHW" or "NDHWC". ValueError: If `pool_padding` is a string, but not "SAME" or "VALID". ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True. ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero. ShapeError: If the input is not a 4-D or 5-D Tensor. ShapeError: If the dimension of input minus the size of `pool_stride` is not 2. ShapeError: If the size of `pool_size` and `pool_stride` is not equal. ShapeError: If the output's shape calculated is not greater than 0. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32') # max pool3d pool3d = fluid.layers.pool3d( input = data, pool_size = 2, pool_type = "max", pool_stride = 1, global_pooling=False) # average pool3d pool3d = fluid.layers.pool3d( input = data, pool_size = 2, pool_type = "avg", pool_stride = 1, global_pooling=False) # global average pool3d pool3d = fluid.layers.pool3d( input = data, pool_size = 2, pool_type = "avg", pool_stride = 1, global_pooling=True) # example 1: # Attr(pool_padding) is a list with 6 elements, Attr(data_format) is "NCDHW". out_1 = fluid.layers.pool3d( input = data, pool_size = 2, pool_type = "avg", pool_stride = 1, pool_padding = [1, 2, 1, 0, 1, 2], global_pooling = False, data_format = "NCDHW") # example 2: # Attr(pool_padding) is a string, Attr(data_format) is "NCDHW". out_2 = fluid.layers.pool3d( input = data, pool_size = 3, pool_type = "avg", pool_stride = 1, pool_padding = "VALID", global_pooling = False, data_format = "NCDHW") """ if pool_type not in ["max", "avg"]: raise ValueError( "Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.", str(pool_type)) if global_pooling is False and pool_size == -1: raise ValueError( "When Attr(global_pooling) is False, Attr(pool_size) must be passed " "and be a valid value. Received Attr(pool_size): %s." % str(pool_size)) if not isinstance(use_cudnn, bool): raise TypeError("Attr(use_cudnn) should be True or False. Received " "Attr(use_cudnn): %s. " % str(use_cudnn)) if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " "Attr(data_format): %s" % str(data_format)) pool_size = utils.convert_to_list(pool_size, 3, 'pool_size') pool_stride = utils.convert_to_list(pool_stride, 3, 'pool_stride') def update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, (list, tuple)): return True return False if is_list_or_tuple(padding) and len(padding) == 5: if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:4] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 6, 'padding') if utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] elif is_list_or_tuple(padding) and len(padding) == 6: padding = utils.convert_to_list(padding, 6, 'padding') if utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] else: padding = utils.convert_to_list(padding, 3, 'padding') return padding padding_algorithm = "EXPLICIT" if isinstance(pool_padding, str): pool_padding = pool_padding.upper() if pool_padding not in ["SAME", "VALID"]: raise ValueError( "Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'." % str(pool_padding)) if pool_padding == "VALID": padding_algorithm = "VALID" pool_padding = [0, 0, 0] if ceil_mode != False: raise ValueError( "When Attr(pool_padding) is \"VALID\", ceil_mode must be False. " "Received ceil_mode: True.") elif pool_padding == "SAME": padding_algorithm = "SAME" pool_padding = [0, 0, 0] pool_padding = update_padding(pool_padding, data_format) op_type = "pool3d" helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=op_type, inputs={"X": input}, outputs={"Out": pool_out}, attrs={ "pooling_type": pool_type, "ksize": pool_size, "global_pooling": global_pooling, "strides": pool_stride, "paddings": pool_padding, "padding_algorithm": padding_algorithm, "use_cudnn": use_cudnn, "ceil_mode": ceil_mode, "use_mkldnn": False, "exclusive": exclusive, "data_format": data_format, }) return pool_out @deprecated(since="2.0.0") @templatedoc(op_type="pool2d") def adaptive_pool2d(input, pool_size, pool_type="max", require_index=False, name=None): r""" This operation calculates the output based on the input, pool_size, pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch size, C is the number of channels, H is the height of the feature, and W is the width of the feature. Parameters(pool_size) should contain two elements which represent height and width, respectively. Also the H and W dimensions of output(Out) is same as Parameter(pool_size). The output tensor shape will be [N, C, pool_size[0], pool_size[1]] For average adaptive pool2d: .. math:: hstart &= floor(i * H_{in} / H_{out}) hend &= ceil((i + 1) * H_{in} / H_{out}) wstart &= floor(j * W_{in} / W_{out}) wend &= ceil((j + 1) * W_{in} / W_{out}) Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} Args: input (Tensor): The input tensor of pooling operator, which is a 4-D tensor with shape [N, C, H, W]. The format of input tensor is NCHW, where N is batch size, C is the number of channels, H is the height of the feature, and W is the width of the feature. The data type is float32 or float64. pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain two integers, (pool_size_Height, pool_size_Width). pool_type: ${pooling_type_comment} require_index (bool): If true, the index of max pooling point will be returned along with outputs. It cannot be set in average pooling type. Default False. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Tensor: The output tensor of adaptive pooling result. The data type is same as input tensor. Raises: ValueError: 'pool_type' is not 'max' nor 'avg'. ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'. ValueError: 'pool_size' should be a list or tuple with length as 2. Examples: .. code-block:: python # average adaptive pool2d # suppose input data in shape of [N, C, H, W], `pool_size` is [m, n], # output shape is [N, C, m, n], adaptive pool divide H and W dimensions # of input data into m * n grids averagely and performs poolings in each # grid to get output. # adaptive average pool performs calculations as follow: # # for i in range(m): # for j in range(n): # hstart = floor(i * H / m) # hend = ceil((i + 1) * H / m) # wstart = floor(i * W / n) # wend = ceil((i + 1) * W / n) # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) # import paddle paddle.enable_static() data = paddle.rand(shape=[1,3,32,32]) pool_out = paddle.fluid.layers.adaptive_pool2d( input=data, pool_size=[3, 3], pool_type='avg') # max adaptive pool2d # suppose input data in shape of [N, C, H, W], `pool_size` is [m, n], # output shape is [N, C, m, n], adaptive pool divide H and W dimensions # of input data into m * n grids averagely and performs poolings in each # grid to get output. # adaptive average pool performs calculations as follow: # # for i in range(m): # for j in range(n): # hstart = floor(i * H / m) # hend = ceil((i + 1) * H / m) # wstart = floor(i * W / n) # wend = ceil((i + 1) * W / n) # output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend]) # import paddle data = paddle.rand(shape=[1,3,32,32]) pool_out = paddle.fluid.layers.adaptive_pool2d( input=data, pool_size=[3, 3], pool_type='max') """ check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'adaptive_pool2d') check_type(pool_type, 'pool_type', str, 'adaptive_pool2d') check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d') check_type(require_index, 'require_index', bool, 'adaptive_pool2d') if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", str(pool_type)) if pool_type == "avg" and require_index: raise ValueError( "invalid setting 'require_index' true when 'pool_type' is 'avg'.") pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') if pool_type == "max": l_type = 'max_pool2d_with_index' else: l_type = "pool2d" helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) outputs = {"Out": pool_out} if pool_type == "max": mask = helper.create_variable_for_type_inference(dtype) outputs["Mask"] = mask helper.append_op( type=l_type, inputs={"X": input}, outputs=outputs, attrs={ "pooling_type": pool_type, "ksize": pool_size, "adaptive": True, }) return (pool_out, mask) if require_index else pool_out @deprecated(since="2.0.0") @templatedoc(op_type="pool3d") def adaptive_pool3d(input, pool_size, pool_type="max", require_index=False, name=None): r""" This operation calculates the output based on the input, pool_size, pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, and W is the width of the feature. Parameters(pool_size) should contain three elements which represent height and width, respectively. Also the D, H and W dimensions of output(Out) is same as Parameter(pool_size). The output tensor shape will be [N, C, pool_size[0], pool_size[1], pool_size[2]] For average adaptive pool3d: .. math:: dstart &= floor(i * D_{in} / D_{out}) dend &= ceil((i + 1) * D_{in} / D_{out}) hstart &= floor(j * H_{in} / H_{out}) hend &= ceil((j + 1) * H_{in} / H_{out}) wstart &= floor(k * W_{in} / W_{out}) wend &= ceil((k + 1) * W_{in} / W_{out}) Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} Args: input (Tensor): The input tensor of pooling operator, which is a 5-D tensor with shape [N, C, D, H, W]. The format of input tensor is NCDHW, where N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, and W is the width of the feature. The data type is float32 or float64. pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, it must contain three integers, (Depth, Height, Width). pool_type: ${pooling_type_comment} require_index (bool): If true, the index of max pooling point will be returned along with outputs. It cannot be set in average pooling type. Default False. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Tensor: The output tensor of adaptive pooling result. The data type is same as input tensor. Raises: ValueError: 'pool_type' is not 'max' nor 'avg'. ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'. ValueError: 'pool_size' should be a list or tuple with length as 2. Examples: .. code-block:: python # average adaptive pool3d # suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n], # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions # of input data into l * m * n grids averagely and performs poolings in each # grid to get output. # adaptive average pool performs calculations as follow: # # for i in range(l): # for j in range(m): # for k in range(n): # dstart = floor(i * D / l) # dend = ceil((i + 1) * D / l) # hstart = floor(j * H / m) # hend = ceil((j + 1) * H / m) # wstart = floor(k * W / n) # wend = ceil((k + 1) * W / n) # output[:, :, i, j, k] = # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) # import paddle paddle.enable_static() data = paddle.rand(shape=[1,3,32,32,32]) pool_out = paddle.fluid.layers.adaptive_pool3d( input=data, pool_size=[3, 3, 3], pool_type='avg') # max adaptive pool3d # suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n], # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions # of input data into l * m * n grids averagely and performs poolings in each # grid to get output. # adaptive average pool performs calculations as follow: # # for i in range(l): # for j in range(m): # for k in range(n): # dstart = floor(i * D / l) # dend = ceil((i + 1) * D / l) # hstart = floor(j * H / m) # hend = ceil((j + 1) * H / m) # wstart = floor(k * W / n) # wend = ceil((k + 1) * W / n) # output[:, :, i, j, k] = # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) # import paddle data = paddle.rand(shape=[1,3,32,32,32]) pool_out = paddle.fluid.layers.adaptive_pool3d( input=data, pool_size=[3, 3, 3], pool_type='max') """ check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'adaptive_pool3d') check_type(pool_type, 'pool_type', str, 'adaptive_pool3d') check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d') check_type(require_index, 'require_index', bool, 'adaptive_pool3d') if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", str(pool_type)) if pool_type == "avg" and require_index: raise ValueError( "invalid setting 'require_index' true when 'pool_type' is 'avg'.") pool_size = utils.convert_to_list(pool_size, 3, 'pool_size') if pool_type == "max": l_type = 'max_pool3d_with_index' else: l_type = "pool3d" helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) outputs = {"Out": pool_out} if pool_type == "max": mask = helper.create_variable_for_type_inference(dtype) outputs["Mask"] = mask helper.append_op( type=l_type, inputs={"X": input}, outputs=outputs, attrs={ "pooling_type": pool_type, "ksize": pool_size, "adaptive": True, }) return (pool_out, mask) if require_index else pool_out def batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=True, use_global_stats=False): r""" :api_attr: Static Graph **Batch Normalization Layer** Can be used as a normalizer function for convolution or fully_connected operations. The required data format for this layer is one of the following: 1. NHWC `[batch, in_height, in_width, in_channels]` 2. NCHW `[batch, in_channels, in_height, in_width]` Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_ for more details. :math:`input` is the input features over a mini-batch. .. math:: \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\ \ mini-batch\ mean \\\\ \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\ \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\ \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\ y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift moving\_mean = moving\_mean * momentum + mini-batch\_mean * (1. - momentum) \\\\ moving\_var = moving\_var * momentum + mini-batch\_var * (1. - momentum) moving_mean is global mean and moving_var is global variance. When use_global_stats = True, the :math:`\\mu_{\\beta}` and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch. They are global (or running) statistics. (It usually got from the pre-trained model.) The training and testing (or inference) have the same behavior: .. math:: \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ \\sigma_{\\beta}^{2} + \\epsilon}} \\\\ y_i &\\gets \\gamma \\hat{x_i} + \\beta Note: if build_strategy.sync_batch_norm=True, the batch_norm in network will use sync_batch_norm automatically. `is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`. Args: input(Tensor): The rank of input Tensor can be 2, 3, 4, 5. The data type is float16 or float32 or float64. act(string, Default None): Activation type, linear|relu|prelu|... is_test (bool, Default False): A flag indicating whether it is in test phrase or not. momentum(float|Tensor, Default 0.9): The value used for the moving_mean and moving_var computation. This should be a float number or a Tensor with shape [1] and data type as float32. The updated formula is: :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)` Default is 0.9. epsilon(float, Default 1e-05): A value added to the denominator for numerical stability. Default is 1e-5. param_attr(ParamAttr|None): The parameter attribute for Parameter `scale` of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm will create ParamAttr as param_attr, the name of scale can be set in ParamAttr. If the Initializer of the param_attr is not set, the parameter is initialized with Xavier. Default: None. bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_layout (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. in_place(bool, Default False): Make the input and output of batch norm reuse memory. name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it is set to None, batch_norm will save global mean with a random name, otherwise, batch_norm will save global mean with the string. moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance. If it is set to None, batch_norm will save global variance with a random name, otherwise, batch_norm will save global variance with the string. do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model average when model average is enabled. use_global_stats(bool, Default False): Whether to use global mean and variance. In inference or test mode, set use_global_stats to true or is_test to true, and the behavior is equivalent. In train mode, when setting use_global_stats True, the global mean and variance are also used during train period. Returns: A Tensor which is the result after applying batch normalization on the input, has same shape and data type with input. Examples: .. code-block:: python import paddle paddle.enable_static() x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32') hidden1 = paddle.static.nn.fc(x=x, size=200) print(hidden1.shape) # [3, 200] hidden2 = paddle.static.nn.batch_norm(input=hidden1) print(hidden2.shape) # [3, 200] """ assert bias_attr is not False, "bias_attr should not be False in batch_norm." helper = LayerHelper('batch_norm', **locals()) check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'batch_norm') dtype = helper.input_dtype() # use fp32 for bn parameter if dtype == core.VarDesc.VarType.FP16: dtype = core.VarDesc.VarType.FP32 input_shape = input.shape if data_layout == 'NCHW': channel_num = input_shape[1] else: if data_layout == 'NHWC': channel_num = input_shape[-1] else: raise ValueError("unsupported data layout:" + data_layout) param_shape = [channel_num] # create parameter scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) mean = helper.create_parameter( attr=ParamAttr( name=moving_mean_name, initializer=Constant(0.0), trainable=False, do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=dtype) mean.stop_gradient = True variance = helper.create_parameter( attr=ParamAttr( name=moving_variance_name, initializer=Constant(1.0), trainable=False, do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=dtype) variance.stop_gradient = True # create output # mean and mean_out share the same memory mean_out = mean # variance and variance_out share the same memory variance_out = variance saved_mean = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) saved_variance = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) reserve_space = None if not is_test: reserve_space = helper.create_variable_for_type_inference( dtype=helper.input_dtype(), stop_gradient=True) batch_norm_out = input if in_place else \ helper.create_variable_for_type_inference(dtype) inputs = { "X": input, "Scale": scale, "Bias": bias, "Mean": mean, "Variance": variance } attrs = { "epsilon": epsilon, "is_test": is_test, "data_layout": data_layout, "use_mkldnn": False, "fuse_with_relu": False, "use_global_stats": use_global_stats } if isinstance(momentum, Variable): inputs['MomemtumTensor'] = momentum else: attrs['momentum'] = momentum outputs = { "Y": batch_norm_out, "MeanOut": mean_out, "VarianceOut": variance_out, "SavedMean": saved_mean, "SavedVariance": saved_variance } if reserve_space is not None: outputs["ReserveSpace"] = reserve_space helper.append_op( type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs) return helper.append_activation(batch_norm_out) def inplace_abn(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=True, use_global_stats=False, act_alpha=1.0): r""" **In-place Activation Batch Normalization Layer** This layer calculates batch normalization and activation with in-place memory. For batch normalization calculations, see `fluid.layers.batch_norm`. For in-place activation batch normalization, see `In-Place Activated BatchNorm for Memory-Optimized Training of DNNs <https://arxiv.org/abs/1712.02616>`_ `inplace_abn` only support activation type as `None`, `identity`, `leaky_relu`, `elu` currently. `inplace_abn` only support data type as `float32`, `float64` currently. Note: if build_strategy.sync_batch_norm=True, the batch_norm in network will use sync_batch_norm automatically. `is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`. Args: input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type is float16 or float32 or float64. act(string, Default None): Activation type, linear|relu|prelu|... is_test (bool, Default False): A flag indicating whether it is in test phrase or not. momentum(float|Variable, Default 0.9): The value used for the moving_mean and moving_var computation. This should be a float number or a Variable with shape [1] and data type as float32. The updated formula is: :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)` Default is 0.9. epsilon(float, Default 1e-05): A value added to the denominator for numerical stability. Default is 1e-5. param_attr(ParamAttr|None): The parameter attribute for Parameter `scale` of inplace_abn. If it is set to None or one attribute of ParamAttr, inplace_abn will create ParamAttr as param_attr, the name of scale can be set in ParamAttr. If the Initializer of the param_attr is not set, the parameter is initialized with Xavier. Default: None. bias_attr(ParamAttr|None): The parameter attribute for the bias of inplace_abn. If it is set to None or one attribute of ParamAttr, inplace_abn will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. data_layout (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. name(str|None): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it is set to None, inplace_abn will save global mean with a random name, otherwise, inplace_abn will save global mean with the string. moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance. If it is set to None, inplace_abn, will save global variance with a random name, otherwise, inplace_abn will save global variance with the string. do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model average when model average is enabled. use_global_stats(bool, Default False): Whether to use global mean and variance. In inference or test mode, set use_global_stats to true or is_test to true, and the behavior is equivalent. In train mode, when setting use_global_stats True, the global mean and variance are also used during train period. act_alpha(float, Default 1.0): when activation is in ['elu', 'identity', 'leaky_relu'], inplace activative batch normalization will be used, and alpha parameter for activation can be given by this parameter. Returns: A Variable holding Tensor which is the result after applying batch normalization and activation on the input, has same shape and data type with input. Examples: .. code-block:: python import paddle.fluid as fluid x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32') hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') hidden2 = fluid.layers.inplace_abn(input=hidden1) hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2) """ assert act in [None, 'identity', 'leaky_relu', 'elu'], \ "inplace_abn only support act as None, 'identity', " \ "'leaky_relu', 'elu' currently" assert bias_attr is not False, "bias_attr should not be False in inplace_abn." helper = LayerHelper('inplace_abn', **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'inplace_abn') dtype = helper.input_dtype() input_shape = input.shape if data_layout == 'NCHW': channel_num = input_shape[1] else: if data_layout == 'NHWC': channel_num = input_shape[-1] else: raise ValueError("unsupported data layout:" + data_layout) param_shape = [channel_num] # create parameter scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) mean = helper.create_parameter( attr=ParamAttr( name=moving_mean_name, initializer=Constant(0.0), trainable=False, do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=dtype) mean.stop_gradient = True variance = helper.create_parameter( attr=ParamAttr( name=moving_variance_name, initializer=Constant(1.0), trainable=False, do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=dtype) variance.stop_gradient = True # create output # mean and mean_out share the same memory mean_out = mean # variance and variance out share the same memory variance_out = variance saved_mean = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) saved_variance = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) reserve_space = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) batch_norm_out = input inputs = { "X": input, "Scale": scale, "Bias": bias, "Mean": mean, "Variance": variance } attrs = { "epsilon": epsilon, "is_test": is_test, "data_layout": data_layout, "use_mkldnn": False, "fuse_with_relu": False, "use_global_stats": use_global_stats, "activation": act, "alpha": act_alpha, } if isinstance(momentum, Variable): inputs['MomemtumTensor'] = momentum else: attrs['momentum'] = momentum outputs = { "Y": batch_norm_out, "MeanOut": mean_out, "VarianceOut": variance_out, "SavedMean": saved_mean, "SavedVariance": saved_variance } if reserve_space is not None: outputs["ReserveSpace"] = reserve_space helper.append_op( type="inplace_abn", inputs=inputs, outputs=outputs, attrs=attrs) return batch_norm_out def instance_norm(input, epsilon=1e-05, param_attr=None, bias_attr=None, name=None): r""" :api_attr: Static Graph **Instance Normalization Layer** Can be used as a normalizer function for convolution or fully_connected operations. The required data format for this layer is one of the following: DataLayout: NCHW `[batch, in_channels, in_height, in_width]` Refer to `Instance Normalization: The Missing Ingredient for Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_ for more details. :math:`input` is the input features over a mini-batch. .. math:: \\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\ \\ mean\ of\ one\ feature\ map\ in\ mini-batch \\\\ \\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\ \\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\ \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\ y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift Note: `H` means height of feature map, `W` means width of feature map. Args: input(Tensor): The rank of input tensor can be 2, 3, 4, 5. The data type is float32 or float64. epsilon(float, Default 1e-05): A value added to the denominator for numerical stability. Default is 1e-5. param_attr(ParamAttr|None|bool, optional): The parameter attribute for Parameter `scale` of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm will create ParamAttr as param_attr, the name of scale can be set in ParamAttr. If the Initializer of the param_attr is not set, the parameter is initialized with Xavier. If the param_attr is set to False, instance_norm will not create param_attr. Default: None. bias_attr(ParamAttr|None|bool, optional): The parameter attribute for the bias of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr. If the Initializer of the bias_attr is not set, the bias is initialized zero. If the bias_attr is set to False, instance_norm will not create bias_attr. Default: None. name(string, Default None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: A Tensor which is the result after applying instance normalization on the input, has same shape and data type with input. Examples: .. code-block:: python import paddle paddle.enable_static() x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32') hidden1 = paddle.static.nn.fc(x, size=200) hidden2 = paddle.static.nn.instance_norm(hidden1) """ check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'instance_norm') if param_attr is False: assert bias_attr is False, "param_attr and bias_attr must be set to Fasle at the same time in instance_norm" helper = LayerHelper('instance_norm', **locals()) dtype = helper.input_dtype() # use fp32 for in parameter if dtype == core.VarDesc.VarType.FP16: dtype = core.VarDesc.VarType.FP32 input_shape = input.shape if len(input.shape) < 2 or len(input.shape) > 5: raise ValueError( 'expected 2D or 3D or 4D or 5D input (got {}D input, input shape is: {})'. format(len(input.shape), input_shape)) channel_num = input_shape[1] param_shape = [channel_num] if param_attr != False and bias_attr != False: # create parameter scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True, default_initializer=Constant(0.0)) # create output saved_mean = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) saved_variance = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) instance_norm_out = helper.create_variable_for_type_inference(dtype) inputs = {"X": input} if param_attr != False and bias_attr != False: inputs["Scale"] = scale inputs["Bias"] = bias helper.append_op( type="instance_norm", inputs=inputs, outputs={ "Y": instance_norm_out, "SavedMean": saved_mean, "SavedVariance": saved_variance }, attrs={"epsilon": epsilon, }) return instance_norm_out @static_only def data_norm(input, act=None, epsilon=1e-05, param_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=True, slot_dim=-1, sync_stats=False, summary_decay_rate=0.9999999, enable_scale_and_shift=False): r""" :api_attr: Static Graph **Data Normalization Layer** This op can be used as a normalizer function for conv2d and fully_connected operations. The required data format for this layer is one of the following: 1. NHWC `[batch, in_height, in_width, in_channels]` 2. NCHW `[batch, in_channels, in_height, in_width]` :math:`input` is the input features over a mini-batch. .. math:: \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\ \ mini-batch\ mean \\\\ \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\ \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\ \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\ y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift Args: input(Tensor): The input Tensor. act(string, Default None): Activation type, linear|relu|prelu|... epsilon(float, Default 1e-05): param_attr(ParamAttr): The parameter attribute for Parameter `scale`. data_layout (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. in_place(bool, Default False): Make the input and output of batch norm reuse memory. name(string, Default None): A name for this layer(optional). If set None, the layer will be named automatically. moving_mean_name(string, Default None): The name of moving_mean which store the global Mean. moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance. do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model average when model average is enabled. slot_dim(int): The embedding dimension of one slot. Slot is a set of one specific feature. In pslib mode, we distinguish feature ids by slot and pull their embeddings from parameter server (pslib). The first place of the embedding is the historical show number (occurence time of this feature id with a label 0). If the input of this op is concated by slot-wise embeddings, and the show number is zero when this slot is new or empty, the normalization result may be impractical. To avoid this, we add slot_dim to locate the show number and judge if the show number is zero. If so, we choose to skip normalization on this embedding. sync_stats(bool, Default False): When running with multiple GPU cards, using allreduce to sync the summary messages. summary_decay_rate(float, Default 0.9999999): The decay rate when updating summary. enable_scale_and_shift(bool, Default False): do scale&shift after normalization. Returns: Tensor: A tensor which is the result after applying data normalization on the input. Examples: .. code-block:: python import paddle paddle.enable_static() x = paddle.randn(shape=[32,100]) hidden2 = paddle.static.nn.data_norm(input=x) """ helper = LayerHelper('data_norm', **locals()) dtype = helper.input_dtype() input_shape = input.shape if data_layout == 'NCHW': channel_num = input_shape[1] else: if data_layout == 'NHWC': channel_num = input_shape[-1] else: raise ValueError("unsupported data layout:" + data_layout) param_shape = [channel_num] batch_size_default = 1e4 batch_sum_default = 0.0 batch_square_sum_default = 1e4 scale_w_default = 1.0 bias_default = 0.0 if param_attr and isinstance(param_attr, dict): batch_size_default = param_attr.get("batch_size", 1e4) batch_sum_default = param_attr.get("batch_sum", 0.0) batch_square_sum_default = param_attr.get("batch_square", 1e4) if enable_scale_and_shift: scale_w_default = param_attr.get("scale_w", 1.0) bias_default = param_attr.get("bias", 0.0) # create scale and shift(bias) when enable_scale_and_shift is True if name == None: name = "dn" if enable_scale_and_shift: scale_w = helper.create_parameter( attr=ParamAttr( name=name + '.scale_w', initializer=Constant(value=float(scale_w_default)), trainable=True), shape=param_shape, dtype=input.dtype) bias = helper.create_parameter( attr=ParamAttr( name=name + '.bias', initializer=Constant(value=float(bias_default)), trainable=True), shape=param_shape, dtype=input.dtype) # create parameter batch_size = helper.create_parameter( attr=ParamAttr( name=name + '.batch_size', initializer=Constant(value=float(batch_size_default)), trainable=True), shape=param_shape, dtype=input.dtype) batch_sum = helper.create_parameter( attr=ParamAttr( name=name + '.batch_sum', initializer=Constant(value=float(batch_sum_default)), trainable=True), shape=param_shape, dtype=input.dtype) batch_square_sum = helper.create_parameter( attr=ParamAttr( name=name + '.batch_square_sum', initializer=Constant(value=float(batch_square_sum_default)), trainable=True), shape=param_shape, dtype=input.dtype) means = helper.create_variable(dtype=dtype, stop_gradient=True) scales = helper.create_variable(dtype=dtype, stop_gradient=True) data_norm_out = input if in_place else helper.create_variable(dtype=dtype) inputs = { "X": input, "BatchSize": batch_size, "BatchSum": batch_sum, "BatchSquareSum": batch_square_sum } attrs = { "epsilon": epsilon, "data_layout": data_layout, "sync_stats": sync_stats, "summary_decay_rate": summary_decay_rate, } if slot_dim > 0: attrs["slot_dim"] = slot_dim if enable_scale_and_shift: attrs["enable_scale_and_shift"] = enable_scale_and_shift if enable_scale_and_shift: inputs["scale_w"] = scale_w inputs["bias"] = bias helper.append_op( type="data_norm", inputs=inputs, outputs={ "Y": data_norm_out, "Means": means, "Scales": scales, "BatchSize": batch_size, "BatchSum": batch_sum, "BatchSquareSum": batch_square_sum }, attrs=attrs) return helper.append_activation(data_norm_out) @templatedoc() def layer_norm(input, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, name=None): r""" :api_attr: Static Graph **Layer Normalization Layer** The API implements the function of the Layer Normalization Layer and can be applied to mini-batch input data. Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_ The formula is as follows: .. math:: \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon} y & = f(\\frac{g}{\\sigma}(x - \\mu) + b) - :math:`x`: the vector representation of the summed inputs to the neurons in that layer. - :math:`H`: the number of hidden units in a layers - :math:`\\epsilon`: the small value added to the variance to prevent division by zero. - :math:`g`: the trainable scale parameter. - :math:`b`: the trainable bias parameter. Args: input(Tensor): A multi-dimension ``Tensor`` , and the data type is float32 or float64. scale(bool, optional): Whether to learn the adaptive gain :math:`g` after normalization. Default: True. shift(bool, optional): Whether to learn the adaptive bias :math:`b` after normalization. Default: True. begin_norm_axis(int, optional): The normalization will be performed along dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`. Default: 1. epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-05. param_attr(ParamAttr, optional): The parameter attribute for the learnable gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is omitted. If :attr:`scale` is True and :attr:`param_attr` is None, a default :code:`ParamAttr` would be added as scale. The :attr:`param_attr` is initialized as 1 if it is added. Default: None. bias_attr(ParamAttr, optional): The parameter attribute for the learnable bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is omitted. If :attr:`shift` is True and :attr:`param_attr` is None, a default :code:`ParamAttr` would be added as bias. The :attr:`bias_attr` is initialized as 0 if it is added. Default: None. act(str, optional): Activation to be applied to the output of layer normalization. Default: None. name(str): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor: ``Tensor`` indicating the normalized result, the data type is the same as ``input`` , and the return dimension is the same as ``input`` . Examples: .. code-block:: python import paddle paddle.enable_static() x = paddle.static.data(name='x', shape=[8, 32, 32], dtype='float32') output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1) print(output.shape) # [8, 32, 32] """ assert in_dygraph_mode( ) is not True, "please use LayerNorm instead of layer_norm in dygraph mode!" helper = LayerHelper('layer_norm', **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'layer_norm') dtype = helper.input_dtype() # create intput and parameters inputs = {'X': input} input_shape = input.shape param_shape = [reduce(lambda x, y: x * y, input_shape[begin_norm_axis:])] if scale: assert param_attr is not False, "param_attr should not be False when using scale." scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) inputs['Scale'] = scale else: if param_attr: warnings.warn("param_attr is only available with scale is True.") if shift: assert bias_attr is not False, "bias_attr should not be False when using shift." bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) inputs['Bias'] = bias else: if bias_attr: warnings.warn("bias_attr is only available with shift is True.") # create output mean_out = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) variance_out = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) layer_norm_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="layer_norm", inputs=inputs, outputs={ "Y": layer_norm_out, "Mean": mean_out, "Variance": variance_out, }, attrs={"epsilon": epsilon, "begin_norm_axis": begin_norm_axis}) return helper.append_activation(layer_norm_out) @templatedoc() def group_norm(input, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', name=None): """ :api_attr: Static Graph **Group Normalization Layer** Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ . Parameters: input(Tensor): Tensor with dimension greater than 1, the data type is float32 or float64. groups(int): The number of groups that divided from channels, the data type is int32. epsilon(float, optional): The small value added to the variance to prevent division by zero, the data type is float32. Default: 1e-05. param_attr(ParamAttr|bool, optional): ParamAttr object that specifies weight parameter attribute. If a bool type, only False is supported, which means there is no weight parameter. Default: None, the default weight parameter attribute is used. For more information, please refer to :ref:`api_guide_ParamAttr` . bias_attr(ParamAttr|bool, optional): ParamAttr object that specifies bias parameter attribute. If a bool type, only False is supported, which means there is no bias parameter. Default: None, the default bias parameter attribute is used. For more information, please refer to :ref:`api_guide_ParamAttr` . act(str, optional): Activation to be applied to the output of group normalization. data_layout(str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, *]`. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor: A Tensor has same data type and data format with `input`. Examples: .. code-block:: python import paddle paddle.enable_static() data = paddle.static.data(name='data', shape=[2, 8, 32, 32], dtype='float32') x = paddle.static.nn.group_norm(input=data, groups=4) print(x.shape) # [2, 8, 32, 32] """ helper = LayerHelper('group_norm', **locals()) dtype = helper.input_dtype() check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'group_norm') # create intput and parameters inputs = {'X': input} input_shape = input.shape if len(input_shape) < 2: raise ValueError( f"The dimensions of Op(fluid.layers.group_norm)'s input should be more than 1. But received {len(input_shape)}" ) if data_layout != 'NCHW' and data_layout != 'NHWC': raise ValueError( "Param(data_layout) of Op(fluid.layers.group_norm) got wrong value: received " + data_layout + " but only NCHW or NHWC supported.") channel_num = input_shape[1] if data_layout == 'NCHW' else input_shape[-1] param_shape = [channel_num] if param_attr: scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) inputs['Scale'] = scale if bias_attr: bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) inputs['Bias'] = bias # create output mean_out = helper.create_variable(dtype=dtype, stop_gradient=True) variance_out = helper.create_variable(dtype=dtype, stop_gradient=True) group_norm_out = helper.create_variable(dtype=dtype) helper.append_op( type="group_norm", inputs=inputs, outputs={ "Y": group_norm_out, "Mean": mean_out, "Variance": variance_out, }, attrs={ "epsilon": epsilon, "groups": groups, "data_layout": data_layout }) return helper.append_activation(group_norm_out) @templatedoc() def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): r""" :api_attr: Static Graph **Spectral Normalization Layer** This operation calculates the spectral normalization value of weight parameters of fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D Parameters. Output tensor will be in same shape with input tensor. Calculations are showed as follows. Step 1: Generate vector U in shape of [H], and V in shape of [W]. While H is the :attr:`dim` th dimension of the input weights, and W is the product result of remaining dimensions. Step 2: :attr:`power_iters` should be a positive integer, do following calculations with U and V for :attr:`power_iters` rounds. Calculations as follows: .. math:: \mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2} \mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2} Step 3: Calculate :math:`\sigma(\mathbf{W})` and normalize weight values. .. math:: \sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v} \mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})} Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ . Args: weight(Tensor): ${weight_comment} dim(int): ${dim_comment} power_iters(int): ${power_iters_comment} eps(float): ${eps_comment} name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Tensor: A tensor of weight parameters after spectral normalization. The data type and shape is same as input tensor. Examples: .. code-block:: python import paddle paddle.enable_static() weight = paddle.static.data(name='weight', shape=[2, 8, 32, 32], dtype='float32') x = paddle.static.nn.spectral_norm(weight=weight, dim=1, power_iters=2) print(x.shape) # [2, 8, 32, 32] """ helper = LayerHelper('spectral_norm', **locals()) check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], 'spectral_norm') check_type(dim, 'dim', int, 'spectral_norm') check_type(power_iters, 'power_iters', int, 'spectral_norm') check_type(eps, 'eps', float, 'spectral_norm') dtype = weight.dtype # create intput and parameters inputs = {'Weight': weight} input_shape = weight.shape assert weight.numel() > 0, "Any dimension of input cannot be equal to 0." assert dim < len(input_shape), ("The input `dim` should be less than the " "rank of `weight`, but received dim=" "{}".format(dim)) h = input_shape[dim] w = np.prod(input_shape) // h u = helper.create_parameter( attr=ParamAttr(), shape=[h], dtype=dtype, default_initializer=Normal(0., 1.)) u.stop_gradient = True inputs['U'] = u v = helper.create_parameter( attr=ParamAttr(), shape=[w], dtype=dtype, default_initializer=Normal(0., 1.)) inputs['V'] = v v.stop_gradient = True # create output out = helper.create_variable(dtype=dtype) helper.append_op( type="spectral_norm", inputs=inputs, outputs={"Out": out, }, attrs={ "dim": dim, "power_iters": power_iters, "eps": eps, }) return out def conv2d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCHW'): r""" :api_attr: Static Graph The convolution2D transpose layer calculates the output based on the input, filter, and dilations, strides, paddings. Input(Input) and output(Output) are in NCHW or NHWC format. Where N is batch size, C is the number of channels, H is the height of the feature, and W is the width of the feature. Parameters(dilations, strides, paddings) are two elements. These two elements represent height and width, respectively. The details of convolution transpose layer, please refer to the following explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_. If bias attribution and activation type are provided, bias is added to the output of the convolution, and the corresponding activation function is applied to the final result. For each input :math:`X`, the equation is: .. math:: Out = \sigma (W \\ast X + b) Where: * :math:`X`: Input value, a 4-D Tensor with NCHW or NHWC format. * :math:`W`: Filter value, a 4-D Tensor with MCHW format. * :math:`\\ast`: Convolution operation. * :math:`b`: Bias value, a 2-D Tensor with shape [M, 1]. * :math:`\\sigma`: Activation function. * :math:`Out`: Output value, a 4-D Tensor with data format 'NCHW' or 'NHWC', the shape of :math:`Out` and :math:`X` may be different. Example: - Input: Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)` - Output: Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where .. math:: H^\prime_{out} &= (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom + dilations[0] * (H_f - 1) + 1 \\\\ W^\prime_{out} &= (W_{in} - 1) * strides[1] - pad_width_left - pad_width_right + dilations[1] * (W_f - 1) + 1 \\\\ H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ] \\\\ W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ] Note: The conv2d_transpose can be seen as the backward of the conv2d. For conv2d, when stride > 1, conv2d maps multiple input shape to the same output shape, so for conv2d_transpose, when stride > 1, input shape maps multiple output shape. If output_size is None, :math:`H_{out} = H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}` and :math:`H^\prime_{out} + strides[0]`, and the :math:`W_{out}` of the output size must between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[1]`, conv2d_transpose can compute the kernel size automatically. Args: input(Tensor): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format, its data type is float32 or float64. num_filters(int): The number of the filter. It is as same as the output image channel. output_size(int|tuple, optional): The output image size. If output size is a tuple, it must contain two integers, (image_height, image_width). None if use filter_size, padding, and stride to calculate output_size. If output_size and filter_size are specified at the same time, They should follow the formula above. Default: None. output_size and filter_size should not be None at the same time. filter_size(int|tuple, optional): The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_height, filter_size_width). Otherwise, filter_size_height = filter_size_width = filter_size. None if use output size to calculate filter_size. Default: None. filter_size and output_size should not be None at the same time. stride(int|tuple, optional): The stride size. It means the stride in transposed convolution. If stride is a tuple, it must contain two integers, (stride_height, stride_width). Otherwise, stride_height = stride_width = stride. Default: stride = 1. padding(str|int|list|tuple, optional): The padding size. It means the number of zero-paddings on both sides for each dimension. If `padding` is a string, either 'VALID' or 'SAME' which is the padding algorithm. If `padding` is a tuple or list, it could be in three forms: `[pad_height, pad_width]` or `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCHW"`, `padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`. when `data_format` is `"NHWC"`, `padding` can be in the form `[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. Default: padding = 0. dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points. If dilation is a tuple, it must contain two integers, (dilation_height, dilation_width). Otherwise, dilation_height = dilation_width = dilation. Default: dilation = 1. filter_size(int|tuple, optional): The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_height, filter_size_width). Otherwise, filter_size_height = filter_size_width = filter_size. None if use output size to calculate filter_size. Default: None. groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by grouped convolution in Alex Krizhevsky's Deep CNN paper, in which when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups = 1. param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose will create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with Xavier. Default: None. bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv2d_transpose. If it is set to False, no bias will be added to the output units. If it is set to None or one attribute of ParamAttr, conv2d_transpose will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn library is installed. Default: True. act (str, optional): Activation type, if it is set to None, activation is not appended. Default: None. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. Returns: A Tensor representing the conv2d_transpose, whose data type is the same with input and shape is (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor storing the transposed convolution result, and if act is not None, the tensor storing transposed convolution and non-linearity activation result. Raises: ValueError: If the type of `use_cudnn` is not bool. ValueError: If `data_format` is not "NCHW" or "NHWC". ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 or the element corresponding to the input's channel is not 0. ValueError: If `output_size` and filter_size are None at the same time. ShapeError: If the input is not 4-D Tensor. ShapeError: If the input's dimension size and filter's dimension size not equal. ShapeError: If the dimension size of input minus the size of `stride` is not 2. ShapeError: If the number of input channels is not equal to filter's channels. ShapeError: If the size of `output_size` is not equal to that of `stride`. Examples: .. code-block:: python import paddle paddle.enable_static() data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32') conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3) print(conv2d_transpose.shape) # [-1, 2, 34, 34] """ assert param_attr is not False, "param_attr should not be False in conv2d_transpose." if len(input.shape) != 4: raise ValueError("Input size should be 4, " "but received {}".format(len(input.shape))) if data_format not in ['NCHW', 'NHWC']: raise ValueError( "Attr(data_format) of Op(fluid.layers.conv2d_transpose) got wrong value: received " + data_format + " but only NCHW or NHWC supported.") input_channel = input.shape[1] if data_format == 'NCHW' else input.shape[-1] op_type = 'conv2d_transpose' if (input_channel == groups and num_filters == input_channel and not use_cudnn): op_type = 'depthwise_conv2d_transpose' helper = LayerHelper(op_type, **locals()) if not isinstance(input, Variable): raise TypeError("Input of conv2d_transpose must be Variable") stride = utils.convert_to_list(stride, 2, 'stride') dilation = utils.convert_to_list(dilation, 2, 'dilation') if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") def _update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 4: if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:3] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 4, 'padding') else: padding = utils.convert_to_list(padding, 2, 'padding') padding = [padding[0], padding[0], padding[1], padding[1]] return padding padding_algorithm = "EXPLICIT" if isinstance(padding, str): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % str(padding)) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0, 0, 0] elif padding == "SAME": padding_algorithm = "SAME" padding = [0, 0, 0, 0] padding = _update_padding(padding, data_format) if filter_size is None: if output_size is None: raise ValueError("output_size must be set when filter_size is None") if isinstance(output_size, int): output_size = [output_size, output_size] h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1] w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2] filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + padding[0] + padding[1] - 1) // dilation[0] + 1 filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + padding[2] + padding[3] - 1) // dilation[1] + 1 filter_size = [filter_size_h, filter_size_w] else: filter_size = utils.convert_to_list(filter_size, 2, 'conv2d_transpose.filter_size') if len(padding) == 4 and utils._is_symmetric_padding(padding, 2): padding = [padding[0], padding[2]] if output_size is None: output_size = [] elif isinstance(output_size, (list, tuple, int)): output_size = utils.convert_to_list(output_size, 2, 'output_size') else: raise ValueError("output_size should be int, list[int] or tuple[int]") if groups is None: groups = 1 elif groups <= 0: raise ValueError("the groups of input must be greater than 0, " "but received the groups of input is {}".format( groups)) filter_shape = [input_channel, num_filters // groups] + filter_size img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=op_type, inputs={'Input': [input], 'Filter': [img_filter]}, outputs={'Output': pre_bias}, attrs={ 'output_size': output_size, 'strides': stride, 'paddings': padding, 'padding_algorithm': padding_algorithm, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, 'data_format': data_format }) if data_format == 'NCHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) else: pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4) out = helper.append_activation(pre_act) return out def conv3d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCDHW'): r""" :api_attr: Static Graph The convolution3D transpose layer calculates the output based on the input, filter, and dilations, strides, paddings. Input(Input) and output(Output) are in NCDHW or NDHWC format. Where N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, and W is the width of the feature. Parameters(dilations, strides, paddings) are two elements. These two elements represent height and width, respectively. The details of convolution transpose layer, please refer to the following explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_. If bias attribution and activation type are provided, bias is added to the output of the convolution, and the corresponding activation function is applied to the final result. For each input :math:`X`, the equation is: .. math:: Out = \sigma (W \ast X + b) In the above equation: * :math:`X`: Input value, a Tensor with NCDHW or NDHWC format. * :math:`W`: Filter value, a Tensor with MCDHW format. * :math:`\ast`: Convolution operation. * :math:`b`: Bias value, a 2-D Tensor with shape [M, 1]. * :math:`\sigma`: Activation function. * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. Example: - Input: Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)` - Output: Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` Where .. math:: D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\ H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\ W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\ D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\ H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\ W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[2] ] Note: The conv3d_transpose can be seen as the backward of the conv3d. For conv3d, when stride > 1, conv3d maps multiple input shape to the same output shape, so for conv3d_transpose, when stride > 1, input shape maps multiple output shape. If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \ H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`, the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}` and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`, conv3d_transpose can compute the kernel size automatically. Args: input(Tensor): The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C], the data type of input is float32 or float64. num_filters(int): The number of the filter. It is as same as the output image channel. output_size(int|tuple, optional): The output image size. If output size is a tuple, it must contain three integers, (image_depth, image_height, image_width). This parameter only works when filter_size is None. If output_size and filter_size are specified at the same time, They should follow the formula above. Default: None. Output_size and filter_size should not be None at the same time. filter_size(int|tuple, optional): The filter size. If filter_size is a tuple, it must contain three integers, (filter_size_depth, filter_size_height, filter_size_width). Otherwise, filter_size_depth = filter_size_height = \ filter_size_width = filter_size. None if use output size to calculate filter_size. Default: None. filter_size and output_size should not be None at the same time. padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string, either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding` is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `'NCDHW'`, `padding` can be in the form `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`. when `data_format` is `'NDHWC'`, `padding` can be in the form `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. Default: padding = 0. stride(int|tuple, optional): The stride size. It means the stride in transposed convolution. If stride is a tuple, it must contain three integers, (stride_depth, stride_height, stride_width). Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1. dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points. If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height, dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation. Default: dilation = 1. groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by grouped convolution in Alex Krizhevsky's Deep CNN paper, in which when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups=1 param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose will create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with Xavier. Default: None. bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose. If it is set to False, no bias will be added to the output units. If it is set to None or one attribute of ParamAttr, conv3d_transpose will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn library is installed. Default: True act (str, optional): Activation type, if it is set to None, activation is not appended. Default: None. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. Returns: A Variable holding Tensor representing the conv3d_transpose, whose data type is the same with input and shape is (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor variable storing the transposed convolution result, and if act is not None, the tensor variable storing transposed convolution and non-linearity activation result. Raises: ValueError: If the type of `use_cudnn` is not bool. ValueError: If `data_format` is not "NCDHW" or "NDHWC". ValueError: If `padding` is a string, but not "SAME" or "VALID". ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0 or the element corresponding to the input's channel is not 0. ValueError: If `output_size` and filter_size are None at the same time. ShapeError: If the input is not 5-D Tensor. ShapeError: If the input's dimension size and filter's dimension size not equal. ShapeError: If the dimension size of input minus the size of `stride` is not 2. ShapeError: If the number of input channels is not equal to filter's channels. ShapeError: If the size of `output_size` is not equal to that of `stride`. Examples: .. code-block:: python import paddle import numpy as np paddle.enable_static() data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32') param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001) res = paddle.static.nn.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) place = paddle.CPUPlace() exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) x = np.random.rand(1, 3, 12, 32, 32).astype("float32") output = exe.run(feed={"data": x}, fetch_list=[res]) print(output) """ assert param_attr is not False, "param_attr should not be False in conv3d_transpose." if data_format not in ['NCDHW', 'NDHWC']: raise ValueError( "Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received " + data_format + " but only NCDHW or NDHWC supported.") l_type = "conv3d_transpose" helper = LayerHelper(l_type, **locals()) if not isinstance(input, Variable): raise TypeError("Input of conv3d_transpose must be Variable") if len(input.shape) != 5: raise ValueError( "Input should be 5D tensor, but received input with the shape of {}". format(input.shape)) input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[ -1] stride = utils.convert_to_list(stride, 3, 'stride') dilation = utils.convert_to_list(dilation, 3, 'dilation') if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") def _update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 5: if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:4] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 6, 'padding') elif is_list_or_tuple(padding) and len(padding) == 6: padding = utils.convert_to_list(padding, 6, 'padding') else: padding = utils.convert_to_list(padding, 3, 'padding') padding = [ padding[0], padding[0], padding[1], padding[1], padding[2], padding[2] ] return padding padding_algorithm = "EXPLICIT" if isinstance(padding, str): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % str(padding)) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0, 0, 0, 0, 0] elif padding == "SAME": padding_algorithm = "SAME" padding = [0, 0, 0, 0, 0, 0] padding = _update_padding(padding, data_format) if filter_size is None: if output_size is None: raise ValueError("output_size must be set when filter_size is None") if isinstance(output_size, int): output_size = [output_size, output_size, output_size] d_in = input.shape[2] if data_format == 'NCDHW' else input.shape[1] h_in = input.shape[3] if data_format == 'NCDHW' else input.shape[2] w_in = input.shape[4] if data_format == 'NCDHW' else input.shape[3] filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + padding[0] + padding[1] - 1) // dilation[0] + 1 filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + padding[2] + padding[3] - 1) // dilation[1] + 1 filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + padding[4] + padding[5] - 1) // dilation[2] + 1 filter_size = [filter_size_d, filter_size_h, filter_size_w] else: filter_size = utils.convert_to_list(filter_size, 3, 'conv3d_transpose.filter_size') if len(padding) == 6 and utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] if output_size is None: output_size = [] elif isinstance(output_size, (list, tuple, int)): output_size = utils.convert_to_list(output_size, 3, 'output_size') else: raise ValueError("output_size should be int, list[int] or tuple[int]") groups = 1 if groups is None else groups if groups <= 0: raise ValueError( "the groups of conv3d_transpose should be greater than 0. Received groups: {}". format(groups)) if num_filters % groups != 0: raise ValueError("Attr(num_filters) must be divisible by groups," "Received: Attr(num_filters) is {}, the groups is {}". format(num_filters, groups)) filter_shape = [input_channel, num_filters // groups] + filter_size img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) if data_format == 'NCDHW': data_format = 'NCHW' if data_format == 'NDHWC': data_format = 'NHWC' pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=l_type, inputs={'Input': [input], 'Filter': [img_filter]}, outputs={'Output': pre_bias}, attrs={ 'output_size': output_size, 'strides': stride, 'paddings': padding, 'padding_algorithm': padding_algorithm, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, 'data_format': data_format }) if data_format == 'NCHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) else: pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5) out = helper.append_activation(pre_act) return out def reduce_sum(input, dim=None, keep_dim=False, name=None): """ Computes the sum of tensor elements over the given dimension. Args: input (Variable): The input variable which is a Tensor, the data type is float32, float64, int32, int64. dim (list|int, optional): The dimensions along which the sum is performed. If :attr:`None`, sum all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true, default value is False. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: Tensor, results of summation operation on the specified dim of input tensor, it's data type is the same as input's Tensor. Raises: TypeError, if out data type is different with the input data type. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] # Each example is followed by the corresponding output tensor. x = fluid.data(name='x', shape=[2, 4], dtype='float32') fluid.layers.reduce_sum(x) # [3.5] fluid.layers.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6] fluid.layers.reduce_sum(x, dim=-1) # [1.9, 1.6] fluid.layers.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]] # y is a Tensor variable with shape [2, 2, 2] and elements as below: # [[[1, 2], [3, 4]], # [[5, 6], [7, 8]]] # Each example is followed by the corresponding output tensor. y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26] fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20] """ if dim is not None and not isinstance(dim, list): dim = [dim] if in_dygraph_mode(): reduce_all = True if dim == None or dim == [] or len(dim) == len( input.shape) else False dim = dim if dim != None and dim != [] else [0] return _C_ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim, 'reduce_all', reduce_all) attrs = { 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False } check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'reduce_sum') helper = LayerHelper('reduce_sum', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='reduce_sum', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out @deprecated(since="2.0.0", update_to="paddle.mean") def reduce_mean(input, dim=None, keep_dim=False, name=None): """ Computes the mean of the input tensor's elements along the given dimension. Args: input (Variable): The input variable which is a Tensor, the data type is float32, float64, int32, int64. dim (list|int, optional): The dimension along which the mean is computed. If `None`, compute the mean over all elements of :attr:`input` and return a variable with a single element, otherwise it must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank(input) + dim[i]`. keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true, default value is False. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: Tensor, results of average on the specified dim of input tensor, it's data type is the same as input's Tensor. Raises: TypeError, if out data type is different with the input data type. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] # Each example is followed by the corresponding output tensor. x = fluid.data(name='x', shape=[2, 4], dtype='float32') fluid.layers.reduce_mean(x) # [0.4375] fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8] fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4] fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]] # y is a Tensor variable with shape [2, 2, 2] and elements as below: # [[[1.0, 2.0], [3.0, 4.0]], # [[5.0, 6.0], [7.0, 8.0]]] # Each example is followed by the corresponding output tensor. y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') fluid.layers.reduce_mean(y, dim=[1, 2]) # [2.5, 6.5] fluid.layers.reduce_mean(y, dim=[0, 1]) # [4.0, 5.0] """ return paddle.mean(x=input, axis=dim, keepdim=keep_dim, name=name) def reduce_max(input, dim=None, keep_dim=False, name=None): """ Computes the maximum of tensor elements over the given dimension. Args: input (Variable): The input variable which is a Tensor, the data type is float32, float64, int32, int64. dim (list|int, optional): The dimension along which the maximum is computed. If :attr:`None`, compute the maximum over all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true, default value is False. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: Tensor, results of maximum on the specified dim of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] # Each example is followed by the corresponding output tensor. x = fluid.data(name='x', shape=[2, 4], dtype='float32') fluid.layers.reduce_max(x) # [0.9] fluid.layers.reduce_max(x, dim=0) # [0.2, 0.3, 0.6, 0.9] fluid.layers.reduce_max(x, dim=-1) # [0.9, 0.7] fluid.layers.reduce_max(x, dim=1, keep_dim=True) # [[0.9], [0.7]] # y is a Tensor variable with shape [2, 2, 2] and elements as below: # [[[1.0, 2.0], [3.0, 4.0]], # [[5.0, 6.0], [7.0, 8.0]]] # Each example is followed by the corresponding output tensor. y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') fluid.layers.reduce_max(y, dim=[1, 2]) # [4.0, 8.0] fluid.layers.reduce_max(y, dim=[0, 1]) # [7.0, 8.0] """ helper = LayerHelper('reduce_max', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( type='reduce_max', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def reduce_min(input, dim=None, keep_dim=False, name=None): """ Computes the minimum of tensor elements over the given dimension. Args: input (Variable): The input variable which is a Tensor, the data type is float32, float64, int32, int64. dim (list|int, optional): The dimensions along which the minimum is computed. If :attr:`None`, compute the minimum over all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true, default value is False. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: Tensor, result of minimum on the specified dim of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] # Each example is followed by the corresponding output tensor. x = fluid.data(name='x', shape=[2, 4], dtype='float32') fluid.layers.reduce_min(x) # [0.1] fluid.layers.reduce_min(x, dim=0) # [0.1, 0.2, 0.5, 0.7] fluid.layers.reduce_min(x, dim=-1) # [0.2, 0.1] fluid.layers.reduce_min(x, dim=1, keep_dim=True) # [[0.2], [0.1]] # y is a Tensor variable with shape [2, 2, 2] and elements as below: # [[[1.0, 2.0], [3.0, 4.0]], # [[5.0, 6.0], [7.0, 8.0]]] # Each example is followed by the corresponding output tensor. y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') fluid.layers.reduce_min(y, dim=[1, 2]) # [1.0, 5.0] fluid.layers.reduce_min(y, dim=[0, 1]) # [1.0, 2.0] """ helper = LayerHelper('reduce_min', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( type='reduce_min', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def reduce_prod(input, dim=None, keep_dim=False, name=None): """ Computes the product of tensor elements over the given dimension. Args: input (Variable): The input variable which is a Tensor, the data type is float32, float64, int32, int64. dim (int|list|tuple, optional): The dimensions along which the product is performed. If :attr:`None`, multiply all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true, default value is False. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: Tensor, result of product on the specified dim of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] # Each example is followed by the corresponding output tensor. x = fluid.data(name='x', shape=[2, 4], dtype='float32') fluid.layers.reduce_prod(x) # [0.0002268] fluid.layers.reduce_prod(x, dim=0) # [0.02, 0.06, 0.3, 0.63] fluid.layers.reduce_prod(x, dim=-1) # [0.027, 0.0084] fluid.layers.reduce_prod(x, dim=1, keep_dim=True) # [[0.027], [0.0084]] # y is a Tensor variable with shape [2, 2, 2] and elements as below: # [[[1.0, 2.0], [3.0, 4.0]], # [[5.0, 6.0], [7.0, 8.0]]] # Each example is followed by the corresponding output tensor. y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') fluid.layers.reduce_prod(y, dim=[1, 2]) # [24.0, 1680.0] fluid.layers.reduce_prod(y, dim=[0, 1]) # [105.0, 384.0] """ helper = LayerHelper('reduce_prod', **locals()) if dim is not None and not isinstance(dim, list): if isinstance(dim, tuple): dim = list(dim) elif isinstance(dim, int): dim = [dim] else: raise TypeError( "The type of axis must be int, list or tuple, but received {}". format(type(dim))) check_variable_and_dtype( input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod') out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='reduce_prod', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def reduce_all(input, dim=None, keep_dim=False, name=None): """ This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result. Args: input (Tensor): the input tensor, it's data type should be `bool`. dim (list|int|optional): The dimension along which the logical and is computed. If :attr:`None`, compute the logical and over all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None. keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. The default value is None. Returns: Tensor, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims. Examples: .. code-block:: python import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers import numpy as np # x is a bool Tensor variable with following elements: # [[True, False] # [True, True]] x = fluid.layers.assign(np.array([[1, 0], [1, 1]], dtype='int32')) x = fluid.layers.cast(x, 'bool') out = fluid.layers.reduce_all(x) # False out = fluid.layers.reduce_all(x, dim=0) # [True, False] out = fluid.layers.reduce_all(x, dim=-1) # [False, True] # keep_dim=False, x.shape=(2,2), out.shape=(2,) out = fluid.layers.reduce_all(x, dim=1, keep_dim=True) # [[False], [True]] # keep_dim=True, x.shape=(2,2), out.shape=(2,1) """ check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all') helper = LayerHelper('reduce_all', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( type='reduce_all', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def reduce_any(input, dim=None, keep_dim=False, name=None): """ This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result. Args: input (Tensor): the input tensor, it's data type should be `bool`. dim (list|int|optional): The dimension along which the logical and is computed. If :attr:`None`, compute the logical and over all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None. keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims. Examples: .. code-block:: python import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers import numpy as np # x is a bool Tensor variable with following elements: # [[True, False] # [False, False]] x = fluid.layers.assign(np.array([[1, 0], [0, 0]], dtype='int32')) x = fluid.layers.cast(x, 'bool') out = fluid.layers.reduce_any(x) # True out = fluid.layers.reduce_any(x, dim=0) # [True, False] out = fluid.layers.reduce_any(x, dim=-1) # [True, False] # keep_dim=False, x.shape=(2,2), out.shape=(2,) out = fluid.layers.reduce_any(x, dim=1, keep_dim=True) # [[True], [False]] # keep_dim=True, x.shape=(2,2), out.shape=(2,1) """ check_variable_and_dtype(input, 'input', ('bool'), 'reduce_any') helper = LayerHelper('reduce_any', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( type='reduce_any', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def split(input, num_or_sections, dim=-1, name=None): """ Split the input tensor into multiple sub-Tensors. Args: input (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64. num_or_sections (int|list|tuple): If ``num_or_sections`` is int, then the ``num_or_sections`` indicates the number of equal sized sub-Tensors that the ``input`` will be divided into. If ``num_or_sections`` is a list or tuple, the length of it indicates the number of sub-Tensors and the elements in it indicate the sizes of sub-Tensors' dimension orderly. The length of the list mustn't be larger than the ``input`` 's size of specified dim. dim (int|Tensor, optional): The dimension along which to split, it can be a scalar with type ``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``. If :math:`dim < 0`, the dimension to split along is :math:`rank(input) + dim`. Default is -1. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: list(Tensor): The list of segmented Tensors. Example: .. code-block:: python import paddle.fluid as fluid # input is a Tensor which shape is [3, 9, 5] input = fluid.data( name="input", shape=[3, 9, 5], dtype="float32") out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=1) # out0.shape [3, 3, 5] # out1.shape [3, 3, 5] # out2.shape [3, 3, 5] out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1) # out0.shape [3, 2, 5] # out1.shape [3, 3, 5] # out2.shape [3, 4, 5] out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1) # out0.shape [3, 2, 5] # out1.shape [3, 3, 5] # out2.shape [3, 4, 5] # dim is negative, the real dim is (rank(input) + axis) which real # value is 1. out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=-2) # out0.shape [3, 3, 5] # out1.shape [3, 3, 5] # out2.shape [3, 3, 5] """ if in_dygraph_mode(): num = None attrs = () if isinstance(dim, Variable): dim = dim.numpy() dim = dim.item(0) assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0" dim = (len(input.shape) + dim) if dim < 0 else dim attrs += ('axis', dim) if isinstance(num_or_sections, int): num = num_or_sections attrs += ('num', num_or_sections) elif isinstance(num_or_sections, (list, tuple)): num = len(num_or_sections) if utils._contain_var(num_or_sections): for index, item in enumerate(num_or_sections): if isinstance(item, Variable): num_or_sections[index] = num_or_sections[index].numpy()[ 0] attrs += ('sections', list(num_or_sections)) else: attrs += ('sections', list(num_or_sections)) else: raise TypeError( "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but " "received %s." % (type(num_or_sections))) return _C_ops.split(input, num, *attrs) check_variable_and_dtype( input, 'input', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'split') check_type(num_or_sections, 'num_or_sections', (list, int, tuple), 'split') check_type(dim, 'dim', (int, Variable), 'split') if isinstance(dim, Variable): check_dtype(dim.dtype, 'dim', ['int32', 'int64'], 'split') helper = LayerHelper('split', **locals()) input_shape = input.shape inputs = {'X': input} attrs = {'num': num_or_sections if isinstance(num_or_sections, int) else 0} def _get_SectionsTensorList(one_list): tensor_list = [] unk_dim_idx = -1 for idx, dim_size in enumerate(one_list): if isinstance(dim_size, Variable): dim_size.stop_gradient = True tensor_list.append(dim_size) else: assert (isinstance(dim_size, int)) if dim_size == -1: assert unk_dim_idx == -1, ( "Only one value of 'num_or_section' in split can " "be -1. But received num_or_section[%d] is also -1." % idx) unk_dim_idx = idx temp_out = helper.create_variable_for_type_inference('int32') fill_constant( [1], 'int32', dim_size, force_cpu=True, out=temp_out) tensor_list.append(temp_out) return tensor_list if isinstance(dim, Variable): dim.stop_gradient = True inputs['AxisTensor'] = dim else: assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0" dim = (len(input_shape) + dim) if dim < 0 else dim attrs['axis'] = dim if isinstance(num_or_sections, int): assert num_or_sections > 1, 'num_or_sections must be more than 1.' if isinstance(dim, int) and input_shape[dim] > 0: assert input_shape[dim] % num_or_sections ==0, \ "The input's size along the split dimension " \ "must be evenly divisible by Attr(num_or_sections). " \ "But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim]) num = num_or_sections else: if isinstance(dim, int) and input_shape[dim] > 0: assert len(num_or_sections) <= input_shape[ dim], 'len(num_or_sections) must not be more than input.shape[dim].' num = len(num_or_sections) attrs['sections'] = list( map(lambda ele: -1 if isinstance(ele, Variable) else ele, num_or_sections)) if utils._contain_var(num_or_sections): inputs['SectionsTensorList'] = _get_SectionsTensorList( num_or_sections) outs = [ helper.create_variable_for_type_inference(dtype=helper.input_dtype()) for i in range(num) ] helper.append_op( type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs) return outs def l2_normalize(x, axis, epsilon=1e-12, name=None): r""" This op normalizes `x` along dimension `axis` using an L2 norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes .. math:: y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }} For `x` with more dimensions, this layer independently normalizes each 1-D slice along dimension `axis`. Args: x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float16, float32 or float64. axis(int): The axis on which to apply normalization. If `axis < 0`, \ the dimension to normalization is rank(X) + axis. -1 is the last dimension. epsilon(float): The epsilon value is used to avoid division by zero, \ the default value is 1e-12. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: The output has the same shape and data type with `x`. Examples: .. code-block:: python :name: code-example1 import paddle X = paddle.randn(shape=[3, 5], dtype='float64') out = paddle.fluid.layers.l2_normalize(X, axis=-1) print(out) # [[ 0.21558504 0.56360189 0.47466096 0.46269539 -0.44326736] # [-0.70602414 -0.52745777 0.37771788 -0.2804768 -0.04449922] # [-0.33972208 -0.43014923 0.31772556 0.76617881 -0.10761525]] """ if len(x.shape) == 1: axis = 0 if in_dygraph_mode(): _, out = _C_ops.norm(x, 'axis', 1 if axis is None else axis, 'epsilon', epsilon) return out check_variable_and_dtype(x, "X", ("float16", "float32", "float64"), "norm") helper = LayerHelper("l2_normalize", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) norm = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="norm", inputs={"X": x}, outputs={"Out": out, "Norm": norm}, attrs={ "axis": 1 if axis is None else axis, "epsilon": epsilon, }) return out @deprecated(since="2.0.0", update_to="paddle.matmul") def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): """ Applies matrix multiplication to two tensors. Currently, the input tensors' rank can be any, but when the rank of any inputs is bigger than 3, this two inputs' rank should be equal. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]` in nontransposed form and as :math:`[D, 1]` in transposed form, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]` in nontransposed form and as :math:`[1, D]` in transposed form. - After transpose, the two tensors are 2-D or n-D and matrix multiplication performs in the following way. - If both are 2-D, they are multiplied like conventional matrices. - If either is n-D, it is treated as a stack of matrices residing in the last two dimensions and a batched matrix multiply supporting broadcast applies on the two tensors. Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and nontransposed, the prepended or appended dimension :math:`1` will be removed after matrix multiplication. Args: x (Variable): The input variable which is a Tensor or LoDTensor. y (Variable): The input variable which is a Tensor or LoDTensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. alpha (float): The scale of output. Default 1.0. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Variable: The product Tensor (or LoDTensor) variable. Examples: .. code-block:: python # Examples to clarify shapes of the inputs and output # x: [B, ..., M, K], y: [B, ..., K, N] # fluid.layers.matmul(x, y) # out: [B, ..., M, N] # x: [B, M, K], y: [B, K, N] # fluid.layers.matmul(x, y) # out: [B, M, N] # x: [B, M, K], y: [K, N] # fluid.layers.matmul(x, y) # out: [B, M, N] # x: [M, K], y: [K, N] # fluid.layers.matmul(x, y) # out: [M, N] # x: [B, M, K], y: [K] # fluid.layers.matmul(x, y) # out: [B, M] # x: [K], y: [K] # fluid.layers.matmul(x, y) # out: [1] # x: [M], y: [N] # fluid.layers.matmul(x, y, True, True) # out: [M, N] import paddle import paddle.fluid as fluid paddle.enable_static() x = fluid.layers.data(name='x', shape=[2, 3], dtype='float32') y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32') out = fluid.layers.matmul(x, y, True, True) """ if in_dygraph_mode(): out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y', transpose_y, 'alpha', float(alpha)) return out def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64'], 'matmul') x_shape = list(x.shape) y_shape = list(y.shape) if len(x_shape) == 1: x_shape = [1] + x_shape if len(y_shape) == 1: y_shape = y_shape + [1] # check the inner 2 dimensions if transpose_x: x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2] if transpose_y: y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2] if x_shape[-1] != y_shape[-2]: assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \ "After performing an optional transpose, Input X's width should be " \ "equal to Y's width for multiplication " \ "prerequisites. But received X's shape: %s, Y's shape: %s\n" % \ (x_shape, y_shape) if len(y_shape) > 2 and len(x_shape) > 2: for i, dim_x in enumerate(x_shape[:-2]): # don't check neg shape if dim_x < 0 or y_shape[i] < 0: continue if dim_x != y_shape[i]: raise ValueError( "When the matrix is larger than 2 dimensions, the higher " "dimensional values of the two matrices need to be equal. " "But received x_shape[%d] != y_shape[%d]. X's shape: %s, " "Y's shape: %s.\n" % (i, i, x_shape, y_shape)) attrs = { 'transpose_X': transpose_x, 'transpose_Y': transpose_y, 'alpha': float(alpha), } __check_input(x, y) helper = LayerHelper('matmul', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def topk(input, k, name=None): """ :alias_main: paddle.topk :alias: paddle.topk,paddle.tensor.topk,paddle.tensor.search.topk :old_api: paddle.fluid.layers.topk This OP is used to find values and indices of the k largest entries for the last dimension. If the input is a 1-D Tensor, finds the k largest entries and outputs their values and indices. If the input is a Tensor with higher rank, this operator computes the top k entries along the last dimension. .. code-block:: text Case 1: Input: input.shape = [3, 4] input.data = [[5, 4, 2, 3], [9, 7, 10, 25], [6, 2, 10, 1]] k = 2 Output: The first output: values.shape = [3, 2] values.data = [[5, 4], [10, 25], [6, 10]] The second output: indices.shape = [3, 2] indices.data = [[0, 1], [2, 3], [0, 2]] Args: input(Variable): The input tensor. Support data types: float32, float64. k(int | Variable): The number of top elements to look for along the last dimension of input tensor. name (str, optional): Please refer to :ref:`api_guide_Name`, Default None. Returns: Values (Variable): Input tensor's k largest elements along each last dimensional slice. The dimension is: :math:`input.shape[:-1]+[k]`. Indices (Variable): Indices of k largest elements alone the last dimension of input. The dimension is same as values. Raises: ValueError: If :math:`k < 1` or :math:`k > last dimension of input`. Examples: .. code-block:: python import paddle.fluid as fluid import paddle.fluid.layers as layers # set batch size=None input = fluid.data(name="input", shape=[None, 13, 11], dtype='float32') top5_values, top5_indices = layers.topk(input, k=5) # top5_values.shape[None, 13, 5], top5_indices.shape=[None, 13, 5] # 1D Tensor input1 = fluid.data(name="input1", shape=[None, 13], dtype='float32') top5_values, top5_indices = layers.topk(input1, k=5) #top5_values.shape=[None, 5], top5_indices.shape=[None, 5] # k=Variable input2 = fluid.data(name="input2", shape=[None, 13, 11], dtype='float32') vk = fluid.data(name="vk", shape=[None, 1], dtype='int32') # save k in vk.data[0] vk_values, vk_indices = layers.topk(input2, k=vk) #vk_values.shape=[None, 13, k], vk_indices.shape=[None, 13, k] """ if in_dygraph_mode(): _k = k.numpy().item(0) if isinstance(k, Variable) else k out, indices = _C_ops.top_k(input, 'k', _k) out.stop_gradient = True indices.stop_gradient = True return out, indices inputs = {"X": [input]} attrs = {} if isinstance(k, Variable): inputs['K'] = [k] else: attrs = {'k': k} helper = LayerHelper("top_k", **locals()) values = helper.create_variable_for_type_inference(dtype=input.dtype) indices = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="top_k", inputs=inputs, outputs={"Out": [values], "Indices": [indices]}, attrs=attrs) values.stop_gradient = True indices.stop_gradient = True return values, indices def ctc_greedy_decoder(input, blank, input_length=None, padding_value=0, name=None): r""" This op is used to decode sequences by greedy policy by the following steps: 1. Get the indexes of maximum value for each row in input. a.k.a. numpy.argmax(input, axis=0). 2. For each sequence in result of step1, merge repeated tokens between two blanks and delete all blanks. This op is implemented in two modes: lod and padding, either of them can be used. The input can be either LoDTensor or Tensor, corresponding to lod and padding mode respectively. A simple example as below: .. code-block:: text Given: (1) for lod mode: input.data = [[0.6, 0.1, 0.3, 0.1], [0.3, 0.2, 0.4, 0.1], [0.1, 0.5, 0.1, 0.3], [0.5, 0.1, 0.3, 0.1], [0.5, 0.1, 0.3, 0.1], [0.2, 0.2, 0.2, 0.4], [0.2, 0.2, 0.1, 0.5], [0.5, 0.1, 0.3, 0.1]] input.lod = [[4, 4]] Computation: step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get: [[0], [2], [1], [0]] step2: merge repeated tokens and remove blank which is 0. Then we get first output sequence: [[2], [1]] Finally: output.data = [[2], [1], [3]] output.lod = [[2, 1]] (2) for padding mode: input.data = [[[0.6, 0.1, 0.3, 0.1], [0.3, 0.2, 0.4, 0.1], [0.1, 0.5, 0.1, 0.3], [0.5, 0.1, 0.3, 0.1]], [[0.5, 0.1, 0.3, 0.1], [0.2, 0.2, 0.2, 0.4], [0.2, 0.2, 0.1, 0.5], [0.5, 0.1, 0.3, 0.1]]] input_length.data = [[4], [4]] input.shape = [2, 4, 4] step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get: [[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1] step2: Change the argmax result to use padding mode, then argmax result is [[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]] step3: Apply ctc_align to padding argmax result, padding_value is 0 Finally: output.data = [[2, 1, 0, 0], [3, 0, 0, 0]] output_length.data = [[2], [1]] Parameters: input(Variable): the probabilities of variable-length sequences. When in lod mode, it is a 2-D LoDTensor with LoD information. It's shape is [Lp, num_classes + 1] where Lp is the sum of all input sequences' length and num_classes is the true number of classes. When in padding mode, it is a 3-D Tensor with padding, It's shape is [batch_size, N, num_classes + 1]. (not including the blank label). The data type can be float32 or float64. blank(int): the blank label index of Connectionist Temporal Classification (CTC) loss, which is in the half-opened interval [0, num_classes + 1). input_length(Variable, optional): 2-D LoDTensor, shape is [batch_size, 1], data type is int64. It is used for padding mode. In lod mode, input_length is None. padding_value(int): padding value. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: For lod mode, returns the result of CTC greedy decoder, 2-D LoDTensor, shape is [Lp, 1], \ data type is int64. 'Lp' is the sum of all output sequences' length. If all the sequences \ in result were empty, the result LoDTensor will be [-1] with empty \ LoD [[]]. For padding mode, returns a tuple of (output, output_length), which was described as below: output, 2-D Tensor, shape is [batch_size, N], data type is int64. output_length, 2-D Tensor, shape is [batch_size, 1], data type is int64. It is the length of \ each sequence of output for padding mode. Return type: For lod mode: Variable For padding mode: tuple of two Variables (output, output_length). Examples: .. code-block:: python # for lod mode import paddle.fluid as fluid x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1) cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0) # for padding mode x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32') x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64') out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0, input_length=x_pad_len) """ check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'ctc_greedy_decoder') helper = LayerHelper("ctc_greedy_decoder", **locals()) _, topk_indices = topk(input, k=1) # ctc align op ctc_out = helper.create_variable_for_type_inference(dtype="int64") if input_length is None: helper.append_op( type="ctc_align", inputs={"Input": [topk_indices]}, outputs={"Output": [ctc_out]}, attrs={"merge_repeated": True, "blank": blank}) return ctc_out else: ctc_out_len = helper.create_variable_for_type_inference(dtype="int64") ctc_input = squeeze(topk_indices, [2]) helper.append_op( type="ctc_align", inputs={"Input": [ctc_input], "InputLength": [input_length]}, outputs={"Output": [ctc_out], "OutputLength": [ctc_out_len]}, attrs={ "merge_repeated": True, "blank": blank, "padding_value": padding_value }) return ctc_out, ctc_out_len def transpose(x, perm, name=None): """ Permute the data dimensions of `input` according to `perm`. The `i`-th dimension of the returned tensor will correspond to the perm[i]-th dimension of `input`. Args: x (Tensor): The input Tensor. It is a N-D Tensor of data types bool, float32, float64, int32. perm (list|tuple): Permute the input according to the data of perm. name (str): The name of this layer. It is optional. Returns: Tensor: A transposed n-D Tensor, with data type being bool, float32, float64, int32, int64. For Example: .. code-block:: text x = [[[ 1 2 3 4] [ 5 6 7 8] [ 9 10 11 12]] [[13 14 15 16] [17 18 19 20] [21 22 23 24]]] shape(x) = [2,3,4] # Example 1 perm0 = [1,0,2] y_perm0 = [[[ 1 2 3 4] [13 14 15 16]] [[ 5 6 7 8] [17 18 19 20]] [[ 9 10 11 12] [21 22 23 24]]] shape(y_perm0) = [3,2,4] # Example 2 perm1 = [2,1,0] y_perm1 = [[[ 1 13] [ 5 17] [ 9 21]] [[ 2 14] [ 6 18] [10 22]] [[ 3 15] [ 7 19] [11 23]] [[ 4 16] [ 8 20] [12 24]]] shape(y_perm1) = [4,3,2] Examples: .. code-block:: python import paddle x = paddle.randn([2, 3, 4]) x_transposed = paddle.transpose(x, perm=[1, 0, 2]) print(x_transposed.shape) # [3L, 2L, 4L] """ if in_dygraph_mode(): out, _ = _C_ops.transpose2(x, 'axis', perm) return out check_variable_and_dtype( x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') check_type(perm, 'perm', (list, tuple), 'transpose') if isinstance(perm, tuple): perm = list(perm) if len(perm) != len(x.shape): raise ValueError( "Input(perm) is the permutation of dimensions of Input(x), " "its length should be equal to dimensions of Input(x), " "but received dimension of Input(x) is %s, " "the length of Input(perm) is %s." % (len(x.shape), len(perm))) for idx, dim in enumerate(perm): if dim >= len(x.shape): raise ValueError( "Each element in Input(perm) should be less than Input(x)'s dimension, " "but %d-th element in Input(perm) is %d which exceeds Input(x)'s " "dimension %d." % (idx, perm[idx], len(x.shape))) helper = LayerHelper('transpose', **locals()) out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) helper.append_op( type='transpose2', inputs={'X': [x]}, outputs={'Out': [out], 'XShape': [x_shape]}, attrs={'axis': perm}) return out def im2sequence(input, filter_size=1, stride=1, padding=0, input_image_size=None, out_stride=1, name=None): r""" :api_attr: Static Graph Extracts image patches from the input tensor to form a tensor of shape {input.batch_size * output_height * output_width, filter_size_height * filter_size_width * input.channels}. This op use filter to scan images and convert these images to sequences. After expanding, the number of time step are output_height * output_width for an image, in which output_height and output_width are calculated by below equation: .. math:: output\_height = 1 + \ (padding\_up + padding\_down + input\_height - filter\_size\_height + stride\_height - 1) / stride\_height \\\\ output\_width = 1 + \ (padding\_left + padding\_right + input\_width - filter\_size\_width + stride\_width - 1) / stride\_width And the dimension of each time step is filter_size_height * filter_size_width * input.channels. Parameters: input (Variable): The input should be a 4-D Tensor in :math:`NCHW` format. The data type is float32. filter_size(int32 | List[int32]): The filter size. If filter_size is a List, it must contain two integers, :math:`[filter\_size\_height, filter\_size\_width]` . Otherwise, the filter size will be a square :math:`[filter\_size, filter\_size]` . Default is 1. stride(int32 | List[int32]): The stride size. If stride is a List, it must contain two integers, :math:`[stride\_height, stride\_width]` . Otherwise, the stride size will be a square :math:`[stride\_size, stride\_size]` . Default is 1. padding(int32 | List[int32]): The padding size. If padding is a List, it can contain four integers like :math:`[padding\_up, padding\_left, padding\_down, padding\_right]` to indicate paddings of four direction. Or it can contain two integers :math:`[padding\_height, padding\_width]` which means padding_up = padding_down = padding_height and padding_left = padding_right = padding_width. Otherwise, a scalar padding means padding_up = padding_down = padding_left = padding_right = padding. Default is 0. input_image_size(Variable, optional): the input contains image real size.It's dim is :math:`[batchsize, 2]` . It is just for batch inference when not None. Default is None. out_stride(int32 | List[int32]): The scaling of image through CNN. It is valid only when input_image_size is not None. If out_stride is List, it must contain two integers, :math:`[out\_stride\_height, out\_stride\_W]` . Otherwise, the out_stride_height = out_stride_width = out_stride. Default is 1. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: The output is a 2-D LoDTensor with shape {input.batch\_size * output\_height * output\_width, \ filter\_size\_height * filter\_size\_width * input.channels}. The data type is float32. Return Type: Variable Examples: .. code-block:: text Given: x = [[[[ 6. 2. 1.] [ 8. 3. 5.] [ 0. 2. 6.]] [[ 2. 4. 4.] [ 6. 3. 0.] [ 6. 4. 7.]]] [[[ 6. 7. 1.] [ 5. 7. 9.] [ 2. 4. 8.]] [[ 1. 2. 1.] [ 1. 3. 5.] [ 9. 0. 8.]]]] x.dims = {2, 2, 3, 3} And: filter = [2, 2] stride = [1, 1] padding = [0, 0] Then: output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.] [ 2. 1. 3. 5. 4. 4. 3. 0.] [ 8. 3. 0. 2. 6. 3. 6. 4.] [ 3. 5. 2. 6. 3. 0. 4. 7.] [ 6. 7. 5. 7. 1. 2. 1. 3.] [ 7. 1. 7. 9. 2. 1. 3. 5.] [ 5. 7. 2. 4. 1. 3. 9. 0.] [ 7. 9. 4. 8. 3. 5. 0. 8.]] output.dims = {8, 8} output.lod = [[4, 4]] Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') output = fluid.layers.im2sequence( input=data, stride=[1, 1], filter_size=[2, 2]) """ assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") check_variable_and_dtype(input, 'input', ['float32'], 'im2sequence') if isinstance(filter_size, int): filter_size = [filter_size, filter_size] if isinstance(stride, int): stride = [stride, stride] if isinstance(padding, int): padding = [padding, padding] if len(padding) == 2: padding.append(padding[0]) padding.append(padding[1]) inputs = {"X": input} attrs = {"kernels": filter_size, "strides": stride, "paddings": padding} if input_image_size: if isinstance(out_stride, int): out_stride = [out_stride, out_stride] inputs["Y"] = input_image_size attrs["out_stride"] = out_stride helper = LayerHelper('im2sequence', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @templatedoc() def row_conv(input, future_context_size, param_attr=None, act=None): """ :api_attr: Static Graph ${comment} Args: input (${x_type}): ${x_comment}. future_context_size (int): Future context size. Please note, the shape of convolution kernel is [future_context_size + 1, D]. param_attr (ParamAttr): Attributes of parameters, including name, initializer etc. act (str): Non-linear activation to be applied to output variable. Returns: ${out_comment}. Examples: .. code-block:: python # for LodTensor inputs import paddle paddle.enable_static() x = paddle.static.data(name='x', shape=[9, 16], dtype='float32', lod_level=1) out = paddle.static.nn.row_conv(input=x, future_context_size=2) # for Tensor inputs x = paddle.static.data(name='x', shape=[9, 4, 16], dtype='float32') out = paddle.static.nn.row_conv(input=x, future_context_size=2) """ helper = LayerHelper('row_conv', **locals()) check_variable_and_dtype(input, 'input', ['float32'], 'row_conv') dtype = helper.input_dtype() filter_shape = [future_context_size + 1, input.shape[-1]] filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='row_conv', inputs={'X': [input], 'Filter': [filter_param]}, outputs={'Out': [out]}) return helper.append_activation(out) @templatedoc() def multiplex(inputs, index, name=None): """ Based on the given index parameter, the OP selects a specific row from each input Tensor to construct the output Tensor. If the input of this OP contains :math:`m` Tensors, where :math:`I_{i}` means the i-th input Tensor, :math:`i` between :math:`[0,m)` . And :math:`O` means the output, where :math:`O[i]` means the i-th row of the output, then the output satisfies that :math:`O[i] = I_{index[i]}[i]` . For Example: .. code-block:: text Given: inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]], [[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]], [[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]], [[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]] index = [[3],[0],[1],[2]] out = [[3,0,3,4], # out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4] [0,1,3,4], # out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4] [1,2,4,2], # out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2] [2,3,3,4]] # out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4] Args: inputs (list): The input Tensor list. The list elements are N-D Tensors of data types float32, float64, int32, int64. All input Tensor shapes should be the same and rank must be at least 2. index (Tensor): Used to select some rows in the input Tensor to construct an index of the output Tensor. It is a 2-D Tensor with data type int32 or int64 and shape [M, 1], where M is the number of input Tensors. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Output of multiplex OP, with data type being float32, float64, int32, int64. Examples: .. code-block:: python import paddle import numpy as np img1 = np.array([[1, 2], [3, 4]]).astype(np.float32) img2 = np.array([[5, 6], [7, 8]]).astype(np.float32) inputs = [paddle.to_tensor(img1), paddle.to_tensor(img2)] index = paddle.to_tensor(np.array([[1], [0]]).astype(np.int32)) res = paddle.multiplex(inputs, index) print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)] """ if in_dygraph_mode(): return _C_ops.multiplex(index, inputs) helper = LayerHelper('multiplex', **locals()) check_type(inputs, 'inputs', (list), 'multiplex') if len(inputs) < 2: raise ValueError( "inputs should be a list object with at least 2 elements.") for id, x in enumerate(inputs): check_variable_and_dtype(x, 'input[' + str(id) + ']', ['float32', 'float64', 'int32', 'int64'], 'multiplex') check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex') out = helper.create_variable_for_type_inference(inputs[0].dtype) helper.append_op( type='multiplex', inputs={'X': inputs, 'Ids': index}, outputs={'Out': [out]}) return out def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): """ This layer computes the smooth L1 loss for Variable :attr:`x` and :attr:`y`. It takes the first dimension of :attr:`x` and :attr:`y` as batch size. For each instance, it computes the smooth L1 loss element by element first and then sums all the losses. So the shape of output Variable is [batch_size, 1]. Args: x (Variable): A tensor with rank at least 2. The input value of smooth L1 loss op with shape [batch_size, dim1, ..., dimN]. A LoDTensor or Tensor with type float32. y (Variable): A tensor with rank at least 2. The target value of smooth L1 loss op with same shape as :attr:`x`. A LoDTensor or Tensor with type float32. inside_weight (Variable|None): A tensor with rank at least 2. This input is optional and should have same shape with :attr:`x`. If provided, the result of (:attr:`x` - :attr:`y`) will be multiplied by this tensor element by element. A Tensor with type float32. outside_weight (Variable|None): A tensor with rank at least 2. This input is optional and should have same shape with :attr:`x`. If provided, the out smooth L1 loss will be multiplied by this tensor element by element. A Tensor with type float32. sigma (float|None): Hyper parameter of smooth L1 loss layer. A float scalar with default value 1.0. Returns: Variable: The output smooth L1 loss with shape [batch_size, 1]. A Tensor with type float32. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle paddle.enable_static() data = fluid.data(name="x", shape=[-1, 3], dtype="float32") label = fluid.data(name="y", shape=[-1, 3], dtype="float32") result = fluid.layers.smooth_l1(data,label) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) x = np.random.rand(3,3).astype("float32") y = np.random.rand(3,3).astype("float32") output= exe.run(feed={"x":x, "y":y}, fetch_list=[result]) print(output) #[array([[0.08220536], # [0.36652038], # [0.20541131]], dtype=float32)] """ check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'smooth_l1_loss') check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'smooth_l1_loss') helper = LayerHelper('smooth_l1_loss', **locals()) diff = helper.create_variable_for_type_inference(dtype=x.dtype) loss = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='smooth_l1_loss', inputs={ 'X': x, 'Y': y, 'InsideWeight': inside_weight, 'OutsideWeight': outside_weight }, outputs={'Diff': diff, 'Out': loss}, attrs={'sigma': sigma if sigma is not None else 1.0}) return loss @deprecated(since='2.0.0', update_to='paddle.nn.functional.one_hot') def one_hot(input, depth, allow_out_of_range=False): """ **WARING:** This OP requires the last dimension of Tensor shape must be equal to 1. This OP will be deprecated in a future release. It is recommended to use fluid. :ref:`api_fluid_one_hot` . The operator converts each id in the input to an one-hot vector with a :attr:`depth` length. The value in the vector dimension corresponding to the id is 1, and the value in the remaining dimension is 0. The shape of output Tensor or LoDTensor is generated by adding :attr:`depth` dimension behind the last dimension of the input shape. .. code-block:: text Example 1 (allow_out_of_range=False): input: X.shape = [4, 1] X.data = [[1], [1], [3], [0]] depth = 4 output: Out.shape = [4, 4] Out.data = [[0., 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [1., 0., 0., 0.]] Example 2 (allow_out_of_range=True): input: X.shape = [4, 1] X.data = [[1], [1], [5], [0]] depth = 4 allow_out_of_range = True output: Out.shape = [4, 4] Out.data = [[0., 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 0.], # This id is 5, which goes beyond depth, so set it all-zeros data. [1., 0., 0., 0.]] Example 3 (allow_out_of_range=False): input: X.shape = [4, 1] X.data = [[1], [1], [5], [0]] depth = 4 allow_out_of_range = False output: Throw an exception for Illegal value The second dimension in X is 5, which is greater than depth. Allow_out_of_range =False means that does not allow the word id to exceed depth, so it throws an exception. Args: input(Variable): Tensor or LoDTensor with shape :math:`[N_1, N_2, ..., N_k, 1]` , which contains at least one dimension and the last dimension must be 1. The data type is int32 or int64. depth(scalar): An integer defining the :attr:`depth` of the one hot dimension. If input is word id, depth is generally the dictionary size. allow_out_of_range(bool): A bool value indicating whether the input indices could be out of range :math:`[0, depth)` . When input indices are out of range, exceptions :code:`Illegal value` is raised if :attr:`allow_out_of_range` is False, or zero-filling representations is created if it is set True. Default: False. Returns: Variable: The one-hot representations of input. A Tensor or LoDTensor with type float32. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() # Correspond to the first example above, where label.shape is [4, 1] and one_hot_label.shape is [4, 4]. label = fluid.data(name="label", shape=[4, 1], dtype="int64") one_hot_label = fluid.layers.one_hot(input=label, depth=4) """ if in_dygraph_mode(): if isinstance(depth, Variable): depth = depth.numpy() assert depth.shape == ( 1, ), "depth of type Variable should have shape [1]" depth = depth.item(0) out = _C_ops.one_hot(input, 'depth', depth, 'allow_out_of_range', allow_out_of_range) out.stop_gradient = True return out helper = LayerHelper("one_hot", **locals()) check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot') check_type(depth, 'depth', (six.integer_types, Variable), 'one_hot') one_hot_out = helper.create_variable_for_type_inference(dtype='float32') if not isinstance(depth, Variable): # user attribute inputs = {'X': input} attrs = {'depth': depth, 'allow_out_of_range': allow_out_of_range} else: depth.stop_gradient = True inputs = {'X': input, 'depth_tensor': depth} attrs = {'allow_out_of_range': allow_out_of_range} helper.append_op( type="one_hot", inputs=inputs, attrs=attrs, outputs={'Out': one_hot_out}) one_hot_out.stop_gradient = True return one_hot_out def autoincreased_step_counter(counter_name=None, begin=1, step=1): """ :api_attr: Static Graph Create an auto-increase variable. which will be automatically increased by 1 in every iteration. By default, the first return of this counter is 1, and the step size is 1. Args: counter_name(str, optional): The counter name. Default '@STEP_COUNTER@'. begin(int, optional): The first return value of this counter. Default 1. step(int, optional): The step size. Default 1. Returns: Variable: The auto-increased Variable with data type int64. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() global_step = fluid.layers.autoincreased_step_counter( counter_name='@LR_DECAY_COUNTER@', begin=0, step=1) """ helper = LayerHelper('global_step_counter') if counter_name is None: counter_name = '@STEP_COUNTER@' counter, is_new_var = helper.create_or_get_global_variable( name=counter_name, dtype='int64', shape=[1], persistable=True, belong_to_optimizer=True) if is_new_var: helper.set_variable_initializer( counter, initializer=Constant( value=begin - 1, force_cpu=True)) helper.main_program.global_block()._prepend_op( type='increment', inputs={'X': [counter]}, outputs={'Out': [counter]}, attrs={'step': float(step)}) counter.stop_gradient = True return counter def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): r""" :alias_main: paddle.reshape :alias: paddle.reshape,paddle.tensor.reshape,paddle.tensor.manipulation.reshape This operator changes the shape of ``x`` without changing its data. The target shape can be given by ``shape`` or ``actual_shape``. When ``shape`` and ``actual_shape`` are set at the same time, ``actual_shape`` has a higher priority than ``shape`` but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to guarantee shape inference in compile-time. Some tricks exist when specifying the target shape. 1. -1 means the value of this dimension is inferred from the total element number of x and remaining dimensions. Thus one and only one dimension can be set -1. 2. 0 means the actual dimension value is going to be copied from the corresponding dimension of x. The index of 0s in shape can not exceed the dimension of x. Here are some examples to explain it. 1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape is [6, 8], the reshape operator will transform x into a 2-D tensor with shape [6, 8] and leaving x's data unchanged. 2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape specified is [2, 3, -1, 2], the reshape operator will transform x into a 4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this case, one dimension of the target shape is set to -1, the value of this dimension is inferred from the total element number of x and remaining dimensions. 3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case, besides -1, 0 means the actual dimension value is going to be copied from the corresponding dimension of x. **Note**: The parameter ``actual_shape`` will be deprecated in the future and only use ``shape`` instead to represent the target shape. Args: x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32`` or ``int64``. shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1. The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``shape`` is an Tensor, it should be an 1-D Tensor . actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape according to this given shape rather than ``shape`` specifying shape. That is to say ``actual_shape`` has a higher priority than ``shape(list|tuple)`` but not ``shape(Tensor)``. \ This argument ``actual_shape`` will be removed in a future version. \ Instructions for updating: ``actual_shape`` will be removed in future versions and replaced by ``shape``. act (str, optional): The non-linear activation to be applied to the reshaped input. Default None. inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape`` are the same variable. Otherwise, the input and output of ``layers.reshape`` are different variable. Default False. Note that if ``x`` is more than one OPs' input, ``inplace`` must be False. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor: A reshaped Tensor with the same data type as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() # example 1: # attr shape is a list which doesn't contain Tensors. data_1 = fluid.data( name='data_1', shape=[2, 4, 6], dtype='float32') reshaped_1 = fluid.layers.reshape( x=data_1, shape=[-1, 0, 3, 2]) # the shape of reshaped_1 is [2,4,3,2]. # example 2: # attr shape is a list which contains Tensors. data_2 = fluid.layers.fill_constant([2,25], "int32", 3) dim = fluid.layers.fill_constant([1], "int32", 5) reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10]) # the shape of reshaped_2 is [5,10]. # example 3: data_3 = fluid.data( name="data_3", shape=[2,4,6], dtype='float32') reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8]) # the shape of reshaped_3 is [6,8]. """ if in_dygraph_mode(): #TODO(zhiqiu): enable inplace in dygraph mode. if inplace: warnings.warn( "Inplace on reshape is not allowed and will be discarded in dygraph mode currently." ) if isinstance(shape, (list, tuple)): shape = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in shape ] out, _ = _C_ops.reshape2(x, None, 'shape', shape) elif isinstance(shape, Variable): shape.stop_gradient = True out, _ = _C_ops.reshape2(x, shape) else: raise ValueError( "shape must be an instance of `list`, `tuple` or `Variable`," " got '{}.'".format(type(shape))) return dygraph_utils._append_activation_in_dygraph(out, act) check_variable_and_dtype(x, 'x', [ 'float16', 'float32', 'float64', 'int32', 'int64', 'bool', 'uint16' ], 'reshape') check_type(shape, 'shape', (list, tuple, Variable), 'reshape') check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape') helper = LayerHelper("reshape2", **locals()) def get_attr_shape(list_shape): unk_dim_idx = -1 attrs_shape = [] for dim_idx, dim_size in enumerate(list_shape): if isinstance(dim_size, Variable): attrs_shape.append(-1) else: attrs_shape.append(dim_size) if dim_size == -1: assert unk_dim_idx == -1, ( "Only one dimension value of 'shape' in reshape can " "be -1. But received shape[%d] is also -1." % dim_idx) unk_dim_idx = dim_idx elif dim_size == 0: assert dim_idx < len(x.shape), ( "The index of 0 in `shape` must be less than " "the input tensor X's dimensions. " "But received shape[%d] = 0, X's dimensions = %d." % (dim_idx, len(x.shape))) else: assert dim_size > 0, ( "Each dimension value of 'shape' in reshape must not " "be negative except one unknown dimension. " "But received shape[%d] = %s." % (dim_idx, str(dim_size))) return attrs_shape inputs = {"X": x} attrs = {} if isinstance(shape, Variable): shape.stop_gradient = True inputs["Shape"] = shape elif isinstance(shape, (list, tuple)): assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, " "but received %s." % len(shape)) attrs["shape"] = get_attr_shape(shape) if utils._contain_var(shape): inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape) elif isinstance(actual_shape, Variable): actual_shape.stop_gradient = True inputs["Shape"] = actual_shape out = x if inplace else helper.create_variable_for_type_inference( dtype=x.dtype) x_shape = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="reshape2", inputs=inputs, attrs=attrs, outputs={"Out": out, "XShape": x_shape}) return helper.append_activation(out) def squeeze(input, axes, name=None): """ This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal to one will be deleted. .. code-block:: text Case1: Input: X.shape = (1, 3, 1, 5) axes = [0] Output: Out.shape = (3, 1, 5) Case2: Input: X.shape = (1, 3, 1, 5) axes = [] Output: Out.shape = (3, 5) Case3: Input: X.shape = [1,3,1,5] axes = [-2] Output: Out.shape = [1,3,5] Args: input (Variable): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64. axes (list): One integer or List of integers, indicating the dimensions to be squeezed. Axes range is :math:`[-rank(input), rank(input))`. If axes is negative, :math:`axes=axes+rank(input)`. name (str, optional): Please refer to :ref:`api_guide_Name`, Default None. Returns: Variable: Output squeezed Tensor. Data type is same as input Tensor. Examples: .. code-block:: python import paddle.fluid as fluid import paddle.fluid.layers as layers # set batch size=None x = fluid.data(name='x', shape=[None, 5, 1, 10]) y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10] """ if in_dygraph_mode(): out, _ = _C_ops.squeeze2(input, 'axes', axes) return out helper = LayerHelper("squeeze", **locals()) check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'], 'squeeze') check_type(axes, 'axis/axes', (list, tuple), 'squeeze') out = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type="squeeze2", inputs={"X": input}, attrs={"axes": axes}, outputs={"Out": out, "XShape": x_shape}) return out def unsqueeze(input, axes, name=None): """ Insert single-dimensional entries to the shape of a Tensor. Takes one required argument axes, a list of dimensions that will be inserted. Dimension indices in axes are as seen in the output tensor. For example: .. code-block:: text Given a tensor such that tensor with shape [3, 4, 5], then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1]. Args: input (Variable): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64. axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor . name (str|None): Name for this layer. Returns: Variable: Unsqueezed Tensor, with the same data type as input. Examples: .. code-block:: python import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[5, 10]) y = fluid.layers.unsqueeze(input=x, axes=[1]) """ if in_dygraph_mode(): if isinstance(axes, int): axes = [axes] elif isinstance(axes, Variable): axes = axes.numpy().tolist() elif isinstance(axes, (list, tuple)): axes = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in axes ] out, _ = _C_ops.unsqueeze2(input, 'axes', axes) return out check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'], 'unsqueeze') helper = LayerHelper("unsqueeze2", **locals()) inputs = {"X": input} attrs = {} if isinstance(axes, int): axes = [axes] if isinstance(axes, Variable): axes.stop_gradient = True inputs["AxesTensor"] = axes elif isinstance(axes, (list, tuple)): if utils._contain_var(axes): inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes) else: attrs["axes"] = axes out = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type="unsqueeze2", inputs=inputs, attrs=attrs, outputs={"Out": out, "XShape": x_shape}) return out def lod_reset(x, y=None, target_lod=None): """ Set LoD of :attr:`x` to a new one specified by :attr:`y` or :attr:`target_lod`. When :attr:`y` provided, :attr:`y.lod` would be considered as target LoD first, otherwise :attr:`y.data` would be considered as target LoD. If :attr:`y` is not provided, target LoD should be specified by :attr:`target_lod`. If target LoD is specified by :attr:`y.data` or :attr:`target_lod`, only one level LoD is supported. .. code-block:: text * Example 1: Given a 1-level LoDTensor x: x.lod = [[ 2, 3, 1 ]] x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] x.dims = [6, 1] target_lod: [4, 2] then we get a 1-level LoDTensor: out.lod = [[4, 2]] out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] out.dims = [6, 1] * Example 2: Given a 1-level LoDTensor x: x.lod = [[2, 3, 1]] x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] x.dims = [6, 1] y is a Tensor: y.data = [[2, 4]] y.dims = [1, 3] then we get a 1-level LoDTensor: out.lod = [[2, 4]] out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] out.dims = [6, 1] * Example 3: Given a 1-level LoDTensor x: x.lod = [[2, 3, 1]] x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] x.dims = [6, 1] y is a 2-level LoDTensor: y.lod = [[2, 2], [2, 2, 1, 1]] y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]] y.dims = [6, 1] then we get a 2-level LoDTensor: out.lod = [[2, 2], [2, 2, 1, 1]] out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] out.dims = [6, 1] Args: x (Variable): Input variable which could be a Tensor or LoDTensor. The data type should be int32, int64, float32 or float64. y (Variable, optional): If provided, output's LoD would be derived from :attr:`y`. If y's lod level>0, the data type can be any type. If y's lod level=0, the data type should be int32. target_lod (list|tuple, optional): One level LoD which should be considered as target LoD when :attr:`y` not provided. Returns: Variable: Output variable with LoD specified by this layer. Raises: ValueError: If :attr:`y` and :attr:`target_lod` are both None. Examples: .. code-block:: python import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[10]) y = fluid.layers.data(name='y', shape=[10, 20], lod_level=2) out = fluid.layers.lod_reset(x=x, y=y) """ check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'lod_reset') helper = LayerHelper("lod_reset", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) if y is not None: check_type(y, 'y', (Variable), 'lod_reset') #TODO: check y.lod_level = 0 dtype helper.append_op( type="lod_reset", inputs={'X': x, 'Y': y}, outputs={'Out': out}) elif target_lod is not None: helper.append_op( type="lod_reset", inputs={'X': x}, attrs={'target_lod': target_lod}, outputs={'Out': out}) else: raise ValueError("y and target_lod should not be both none.") return out def lod_append(x, level): """ Append level to LoD of :attr:`x`. .. code-block:: text * Example 1: given a 1-level LoDTensor x: x.lod = [[ 2, 3, 1 ]] x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] x.dims = [6, 1] level: [1, 1, 1, 1, 1, 1, 1] then we get a 2-level LoDTensor: x.lod = [[ 2, 3, 1 ], [1, 1, 1, 1, 1, 1]] x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] x.dims = [6, 1] Args: x (Variable): Input variable which could be a tensor or LoDTensor. The data type should be int32, int64, float32 or float64. level (list|tuple|Variable, optional): The LoD level to be appended into LoD of x. If level is variable and its lod level>0, the data type can be any type. If level is variable and its lod level=0, the data type should be int32. Returns: Variable: Output variable with new LoD level. Raises: ValueError: If :attr:`y` is None or and :attr:`level` is not Iterator. Examples: .. code-block:: python import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[6, 10], lod_level=1) out = fluid.layers.lod_append(x, [1,1,1,1,1,1]) """ from collections import Iterable if x is None: raise ValueError("Input(x) can't be None.") if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)): raise ValueError("Input(level) must be list, tuple or Variable.") check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'lod_append') helper = LayerHelper("lod_append", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) inputs = {'X': x} attrs = {'append': True} if isinstance(level, Variable): inputs['Y'] = level #TODO: check y.lod_level = 0 dtype else: attrs['target_lod'] = level helper.append_op( type="lod_reset", inputs=inputs, attrs=attrs, outputs={'Out': out}) return out def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None, data_format='NCHW'): r""" :alias_main: paddle.nn.functional.lrn :alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn :old_api: paddle.fluid.layers.lrn This operator implements the Local Response Normalization Layer. This layer performs a type of "lateral inhibition" by normalizing over local input regions. For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_ The formula is as follows: .. math:: Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta} In the above equation: - :math:`n` : The number of channels to sum over. - :math:`k` : The offset (avoid being divided by 0). - :math:`\\alpha` : The scaling parameter. - :math:`\\beta` : The exponent parameter. Args: input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W] or [N, H, W, C], where N is the batch size, C is the input channel, H is Height, W is weight. The data type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError. n (int, optional): The number of channels to sum over. Default: 5 k (float, optional): An offset, positive. Default: 1.0 alpha (float, optional): The scaling parameter, positive. Default:1e-4 beta (float, optional): The exponent, positive. Default:0.75 name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. Returns: Variable: A tensor variable storing the transformation result with the same shape and data type as input. Examples: .. code-block:: python import paddle.fluid as fluid data = fluid.data( name="data", shape=[None, 3, 112, 112], dtype="float32") lrn = fluid.layers.lrn(input=data) print(lrn.shape) # [-1, 3, 112, 112] print(lrn.dtype) # float32 """ helper = LayerHelper('lrn', **locals()) check_variable_and_dtype(input, 'input', ['float32'], 'lrn') dtype = helper.input_dtype() input_shape = input.shape dims = len(input_shape) if dims != 4: raise ValueError( "Input's dimension size of Op(lrn) must be 4, but received %d." % (dims)) if data_format not in ['NCHW', 'NHWC']: raise ValueError( "Attr(data_format) of Op(lrn) got wrong value: received " + data_format + " but only NCHW or NHWC supported.") mid_out = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) lrn_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="lrn", inputs={"X": input}, outputs={ "Out": lrn_out, "MidOut": mid_out, }, attrs={ "n": n, "k": k, "alpha": alpha, "beta": beta, "data_format": data_format }) return lrn_out def pad(x, paddings, pad_value=0., name=None): r""" :alias_main: paddle.nn.functional.pad :alias: paddle.nn.functional.pad,paddle.nn.functional.common.pad :old_api: paddle.fluid.layers.pad This op will pad a tensor with a constant value given by :attr:`pad_value`, and the padded shape is specified by :attr:`paddings`. Specifically, the number of values padded before the elements of :attr:`x` in dimension :attr:`i` is indicated by :attr:`paddings[2*i]`, and the number of values padded after the elements of :attr:`x` in dimension :attr:`i` is indicated by :attr:`paddings[2*i+1]`. See below for an example. .. code-block:: text Given: x = [[1, 2], [3, 4]] paddings = [0, 1, 1, 2] pad_value = 0 Return: out = [[0, 1, 2, 0, 0] [0, 3, 4, 0, 0] [0, 0, 0, 0, 0]] Args: x (Variable): Tensor, data type is float32. paddings (list): A list of integers. Its elements specify the padded width before and after each dimension in turn. The length of :attr:`paddings` must be equal to :math:`rank(x) \\times 2`. pad_value (float): The constant value used to pad. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: The padded tensor, with the same data type and rank as :attr:`x` Return Type: Variable Examples: .. code-block:: python # x is a rank 2 tensor variable import paddle.fluid as fluid x = fluid.data(name='data', shape=[300, 300], dtype='float32') out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.) """ check_variable_and_dtype(x, 'x', [ 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', 'complex128' ], "pad") helper = LayerHelper('pad', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad', inputs={'X': x}, outputs={'Out': out}, attrs={'paddings': paddings, 'pad_value': float(pad_value)}) return out def pad_constant_like(x, y, pad_value=0., name=None): r""" Pad :attr:`y` with :attr:`pad_value`, the number of values padded to the edges of each axis is specified by the difference of the shape of :attr:`x` and :attr:`y` . ((0, shape_x_0 - shape_y_0), ... (0, shape_x_n - shape_y_n)) specify padding widths for each axis. The input should be a k-D tensor(k > 0 and k < 7). See below for an example. .. code-block:: text Given: X = [[[[ 0, 1, 2], [ 3, 4, 5]], [[ 6, 7, 8], [ 9, 10, 11]], [[12, 13, 14], [15, 16, 17]]], [[[18, 19, 20], [21, 22, 23]], [[24, 25, 26], [27, 28, 29]], [[30, 31, 32], [33, 34, 35]]]] X.shape = (2, 3, 2, 3) Y = [[[[35, 36, 37]], [[38, 39, 40]], [[41, 42, 43]]]] Y.shape = (1, 3, 1, 3) And pad_value = 0. Return: Out = [[[[35, 36, 37], [ 0, 0, 0]], [[38, 39, 40], [ 0, 0, 0]], [[41, 42, 43], [ 0, 0, 0]]], [[[ 0, 0, 0], [ 0, 0, 0]], [[ 0, 0, 0], [ 0, 0, 0]], [[ 0, 0, 0], [ 0, 0, 0]]]] Out.shape = [2, 3, 2, 3] Args: x (Variable): Tensor, its shape specifies the shape of output. y (Variable): Tensor, its rank is the same with :attr:`x`, and for each dimension :math:`i` , :math:`y\_shape[i] <= x\_shape[i]` . The data type can be float32 or float64. pad_value (float): The constant value used to pad. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: The padded tensor, with the same shape as :attr:`x` and the same data type as :attr:`y` Return Type: Variable Examples: .. code-block:: python # x is a rank 4 tensor variable, x.shape = (2, 3, 2, 3) # y is a rank 4 tensor variable, y.shape = (1, 3, 1, 3) import paddle.fluid as fluid x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32') y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32') out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.) # out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3] """ check_type(x, 'x', (Variable), 'pad_constant_like') check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], "pad_constant_like") helper = LayerHelper('pad_constant_like', **locals()) dtype = helper.input_dtype(input_param_name='y') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad_constant_like', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'pad_value': float(pad_value)}) return out def label_smooth(label, prior_dist=None, epsilon=0.1, dtype="float32", name=None): r""" :alias_main: paddle.nn.functional.label_smooth :alias: paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth :old_api: paddle.fluid.layers.label_smooth Label smoothing is a mechanism to regularize the classifier layer and is called label-smoothing regularization (LSR). Label smoothing is proposed to encourage the model to be less confident, since optimizing the log-likelihood of the correct label directly may cause overfitting and reduce the ability of the model to adapt. Label smoothing replaces the ground-truth label :math:`y` with the weighted sum of itself and some fixed distribution :math:`\mu`. For class :math:`k`, i.e. .. math:: \\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k, where :math:`1 - \epsilon` and :math:`\epsilon` are the weights respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually uniform distribution is used for :math:`\mu`. See more details about label smoothing in https://arxiv.org/abs/1512.00567. Parameters: label(Variable): The input variable containing the label data. The label data should use one-hot representation. It's a multidimensional tensor with a shape of :math:`[N_1, ..., Depth]`, where Depth is class number. The dtype can be "float32" and "float64". prior_dist(Variable, optional): The prior distribution to be used to smooth labels. If not provided, an uniform distribution is used. It's a multidimensional tensor with a shape of :math:`[1, class\_num]` . The default value is None. epsilon(float, optional): The weight used to mix up the original ground-truth distribution and the fixed distribution. The default value is 0.1. dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type can be set as 'float32', 'float64'. The default value is 'float32'. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Variable: The tensor variable containing the smoothed labels. Examples: .. code-block:: python import paddle.fluid as fluid import paddle.fluid.layers as layers label = layers.data(name="label", shape=[1], dtype="int32") one_hot_label = layers.one_hot(input=label, depth=10) smooth_label = layers.label_smooth( label=one_hot_label, epsilon=0.1, dtype="float32") """ if epsilon > 1. or epsilon < 0.: raise ValueError("The value of epsilon must be between 0 and 1.") if in_dygraph_mode(): return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon)) check_variable_and_dtype(label, 'label', ['float32', 'float64'], 'label_smooth') helper = LayerHelper("label_smooth", **locals()) label.stop_gradient = True smooth_label = helper.create_variable_for_type_inference(dtype) helper.append_op( type="label_smooth", inputs={"X": label, "PriorDist": prior_dist} if prior_dist else {"X": label}, outputs={"Out": smooth_label}, attrs={"epsilon": float(epsilon)}) return smooth_label @templatedoc() def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0, rois_num=None, name=None): """ This operator implements the roi_pooling layer. Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7). The operator has three steps: 1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height; 2. Finding the largest value in each section; 3. Copying these max values to the output buffer. For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn Args: input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W], where N is the batch size, C is the input channel, H is Height, W is weight. The data type is float32 or float64. rois (Variable): ROIs (Regions of Interest) to pool over. 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates. pooled_height (int, optional): The pooled output height, data type is int32. Default: 1 pooled_width (int, optional): The pooled output height, data type is int32. Default: 1 spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0 rois_num (Tensor): The number of RoIs in each image. Default: None name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Variable: The pooled feature, 4D-Tensor with the shape of [num_rois, C, pooled_height, pooled_width]. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle paddle.enable_static() DATATYPE='float32' place = fluid.CPUPlace() #place = fluid.CUDAPlace(0) input_data = np.array([i for i in range(1,17)]).reshape(1,1,4,4).astype(DATATYPE) roi_data =fluid.create_lod_tensor(np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., 3.]]).astype(DATATYPE),[[2]], place) rois_num_data = np.array([2]).astype('int32') x = fluid.data(name='input', shape=[None,1,4,4], dtype=DATATYPE) rois = fluid.data(name='roi', shape=[None,4], dtype=DATATYPE) rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32') pool_out = fluid.layers.roi_pool( input=x, rois=rois, pooled_height=1, pooled_width=1, spatial_scale=1.0, rois_num=rois_num) exe = fluid.Executor(place) out, = exe.run(feed={'input':input_data ,'roi':roi_data, 'rois_num': rois_num_data}, fetch_list=[pool_out.name]) print(out) #array([[[[11.]]], [[[16.]]]], dtype=float32) print(np.array(out).shape) # (2, 1, 1, 1) """ if in_dygraph_mode(): assert rois_num is not None, "rois_num should not be None in dygraph mode." pool_out, argmaxes = _C_ops.roi_pool( input, rois, rois_num, "pooled_height", pooled_height, "pooled_width", pooled_width, "spatial_scale", spatial_scale) return pool_out, argmaxes check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool') check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool') helper = LayerHelper('roi_pool', **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) argmaxes = helper.create_variable_for_type_inference(dtype='int32') inputs = { "X": input, "ROIs": rois, } if rois_num is not None: inputs['RoisNum'] = rois_num helper.append_op( type="roi_pool", inputs=inputs, outputs={"Out": pool_out, "Argmax": argmaxes}, attrs={ "pooled_height": pooled_height, "pooled_width": pooled_width, "spatial_scale": spatial_scale }) return pool_out @templatedoc() def roi_align(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0, sampling_ratio=-1, rois_num=None, name=None): """ ${comment} Args: input (Variable): ${x_comment} rois (Variable): ROIs (Regions of Interest) to pool over.It should be a 2-D LoDTensor of shape (num_rois, 4), the lod level is 1. The data type is float32 or float64. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates. pooled_height (int32, optional): ${pooled_height_comment} Default: 1 pooled_width (int32, optional): ${pooled_width_comment} Default: 1 spatial_scale (float32, optional): ${spatial_scale_comment} Default: 1.0 sampling_ratio(int32, optional): ${sampling_ratio_comment} Default: -1 rois_num (Tensor): The number of RoIs in each image. Default: None name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Variable: Output: ${out_comment}. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() x = fluid.data( name='data', shape=[None, 256, 32, 32], dtype='float32') rois = fluid.data( name='rois', shape=[None, 4], dtype='float32') rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32') align_out = fluid.layers.roi_align(input=x, rois=rois, pooled_height=7, pooled_width=7, spatial_scale=0.5, sampling_ratio=-1, rois_num=rois_num) """ if in_dygraph_mode(): assert rois_num is not None, "rois_num should not be None in dygraph mode." align_out = _C_ops.roi_align( input, rois, rois_num, "pooled_height", pooled_height, "pooled_width", pooled_width, "spatial_scale", spatial_scale, "sampling_ratio", sampling_ratio) return align_out check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'roi_align') check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'roi_align') helper = LayerHelper('roi_align', **locals()) dtype = helper.input_dtype() align_out = helper.create_variable_for_type_inference(dtype) inputs = { "X": input, "ROIs": rois, } if rois_num is not None: inputs['RoisNum'] = rois_num helper.append_op( type="roi_align", inputs=inputs, outputs={"Out": align_out}, attrs={ "pooled_height": pooled_height, "pooled_width": pooled_width, "spatial_scale": spatial_scale, "sampling_ratio": sampling_ratio }) return align_out def dice_loss(input, label, epsilon=0.00001, name=None): r""" Dice loss for comparing the similarity between the input predictions and the label. This implementation is for binary classification, where the input is sigmoid predictions of each pixel, usually used for segmentation task. The dice loss can be defined as the following equation: .. math:: dice\_loss &= 1 - \frac{2 * intersection\_area}{total\_area} \\ &= \frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\ &= \frac{(union\_area - intersection\_area)}{total\_area} Parameters: input (Tensor): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_k, D]`, where :math:`N_1` is the batch_size, :math:`D` is the number of categories. It is usually the output predictions of sigmoid activation. The data type can be float32 or float64. label (Tensor): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_k, 1]`. where :math:`N_1` is the batch_size. The data type can be int32 or int64. epsilon (float): The epsilon will be added to the numerator and denominator. If both input and label are empty, it makes sure dice is 1. Default: 0.00001 name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor, which shape is [1], data type is the same as `input` . Example: .. code-block:: python import paddle import paddle.nn.functional as F x = paddle.randn((3,224,224,2)) label = paddle.randint(high=2, shape=(3,224,224,1)) predictions = F.softmax(x) loss = F.dice_loss(input=predictions, label=label) """ assert input.dtype in (paddle.float32, paddle.float64) assert label.dtype in (paddle.int32, paddle.int64) assert len(input.shape) >= 2, \ "The rank of input should be greater than or equal to 2." assert len(input.shape) == len(label.shape), ( "The rank of input and label should be equal, " "but received input: %d, label: %d." % (len(input.shape), len(label.shape))) assert label.shape[-1] == 1, ("The last dimension of label should be 1, " "but received %d." % label.shape[-1]) assert input.shape[:-1] == label.shape[:-1], ( "All dimensions should be equal except the last one.") assert input.numel() > 0 and label.numel() > 0, \ "Any dimension of input and label cannot be equal to 0." label = squeeze(label, [-1]) label = paddle.nn.functional.one_hot(label, input.shape[-1]) reduce_dim = list(range(1, len(input.shape))) inse = reduce_sum(input * label, dim=reduce_dim) dice_denominator = reduce_sum( input, dim=reduce_dim) + reduce_sum( label, dim=reduce_dim) dice_score = 1 - inse * 2 / (dice_denominator + epsilon) return reduce_mean(dice_score) def image_resize(input, out_shape=None, scale=None, name=None, resample='BILINEAR', actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW'): """ This op resizes a batch of images. The input must be a 3-D Tensor of the shape (num_batches, channels, in_w) or a 4-D Tensor of the shape (num_batches, channels, in_h, in_w) or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels), and the resizing only applies on the three dimensions(depth, height and width). **Warning:** the parameter :attr:`actual_shape` will be deprecated in the future and only use :attr:`out_shape` instead. Supporting resample methods: 'LINEAR' : Linear interpolation 'BILINEAR' : Bilinear interpolation 'TRILINEAR' : Trilinear interpolation 'NEAREST' : Nearest neighbor interpolation 'BICUBIC' : Bicubic interpolation Linear interpolation is the method of using a line connecting two known quantities to determine the value of an unknown quantity between the two known quantities. Nearest neighbor interpolation is to perform nearest neighbor interpolation in both the 3rd dimension(in height direction) and the 4th dimension(in width direction) on input tensor. Bilinear interpolation is an extension of linear interpolation for interpolating functions of two variables (e.g. H-direction and W-direction in this op) on a rectilinear 2D grid. The key idea is to perform linear interpolation first in one direction, and then again in the other direction. Trilinear interpolation is an extension of linear interpolation for interpolating functions of three variables (e.g. D-direction, H-direction and W-direction in this op) on a rectilinear 3D grid. The linear interpolation is performed on three directions. Bicubic interpolation is an extension of cubic interpolation for interpolating data points on a two-dimensional regular grid. The interpolated surface is smoother than corresponding surfaces obtained by bilinear interpolation or nearest-neighbor interpolation. Align_corners and align_mode are optional parameters,the calculation method of interpolation can be selected by them. Example: .. code-block:: text For scale: if align_corners = True && out_size > 1 : scale_factor = (in_size-1.0)/(out_size-1.0) else: scale_factor = float(in_size/out_size) Nearest neighbor interpolation: if: align_corners = False input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = floor (H_{in} * scale_{factor}) W_out = floor (W_{in} * scale_{factor}) else: align_corners = True input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = round(H_{in} * scale_{factor}) W_out = round(W_{in} * scale_{factor}) linear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,W_in) output: (N,C,W_out) where: W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,W_in) output: (N,C,H_out,W_out) where: W_out = W_{in} * scale_{factor} Bilinear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = (H_{in}+0.5) * scale_{factor} - 0.5 W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = H_{in} * scale_{factor} W_out = W_{in} * scale_{factor} Trilinear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,D_in,H_in,W_in) output: (N,C,D_out,H_out,W_out) where: D_out = (D_{in}+0.5) * scale_{factor} - 0.5 H_out = (H_{in}+0.5) * scale_{factor} - 0.5 W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,D_in,H_in,W_in) output: (N,C,D_out,H_out,W_out) where: D_out = D_{in} * scale_{factor} Trilinear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,D_in,H_in,W_in) output: (N,C,D_out,H_out,W_out) where: D_out = (D_{in}+0.5) * scale_{factor} - 0.5 H_out = (H_{in}+0.5) * scale_{factor} - 0.5 W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,D_in,H_in,W_in) output: (N,C,D_out,H_out,W_out) where: D_out = D_{in} * scale_{factor} H_out = H_{in} * scale_{factor} W_out = W_{in} * scale_{factor} For details of linear interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Linear_interpolation. For details of nearest neighbor interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation. For details of bilinear interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Bilinear_interpolation. For details of trilinear interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Trilinear_interpolation. For details of bicubic interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Bicubic_interpolation Parameters: input (Variable): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8, its data format is specified by :attr:`data_format`. out_shape (list|tuple|Variable|None): Output shape of image resize layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1]. If a Tensor Variable, its dimensions size should be a 1. scale(float|Variable|None): The multiplier for the input height or width. At least one of :attr:`out_shape` or :attr:`scale` must be set. And :attr:`out_shape` has a higher priority than :attr:`scale`. Default: None. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. resample(str): The resample method. It supports 'LINEAR', 'BICUBIC', 'BILINEAR', 'TRILINEAR' and 'NEAREST' currently. Default: 'BILINEAR' actual_shape(Variable): An optional input to specify output shape dynamically. If provided, image resize according to this given shape rather than :attr:`out_shape` and :attr:`scale` specifying shape. That is to say actual_shape has the highest priority. It is recommended to use :attr:`out_shape` if you want to specify output shape dynamically, because :attr:`actual_shape` will be deprecated. When using actual_shape to specify output shape, one of :attr:`out_shape` and :attr:`scale` should also be set, otherwise errors would be occurred in graph constructing stage. Default: None align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Default: True align_mode(int) : An optional for linear/bilinear/trilinear interpolation. Refer to the fomula in the the example code above, it can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for src_idx = scale*dst_index. data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`, `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. Returns: A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels), A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels), or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels). Raises: TypeError: out_shape should be a list or tuple or Variable. TypeError: actual_shape should either be Variable or None. ValueError: The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR', 'TRILINEAR', 'BICUBIC' or 'NEAREST' currently. ValueError: 'LINEAR' only support 3-D tensor. ValueError: 'BICUBIC', 'BILINEAR' and 'NEAREST' only support 4-D tensor. ValueError: 'TRILINEAR' only support 5-D tensor. ValueError: One of out_shape and scale must not be None. ValueError: out_shape length should be 1 for input 3-D tensor. ValueError: out_shape length should be 2 for input 4-D tensor. ValueError: out_shape length should be 3 for input 5-D tensor. ValueError: scale should be greater than zero. TypeError: align_corners should be a bool value ValueError: align_mode can only be '0' or '1' ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'. Examples: .. code-block:: python #declarative mode import paddle import paddle.fluid as fluid import numpy as np paddle.enable_static() input = fluid.data(name="input", shape=[None,3,6,10]) #1 output = fluid.layers.image_resize(input=input,out_shape=[12,12]) #2 #x = np.array([2]).astype("int32") #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") #fluid.layers.assign(input=x, output=dim1) #output = fluid.layers.image_resize(input=input,out_shape=[12,dim1]) #3 #x = np.array([3,12]).astype("int32") #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") #fluid.layers.assign(input=x, output=shape_tensor) #output = fluid.layers.image_resize(input=input,out_shape=shape_tensor) #4 #x = np.array([0.5]).astype("float32") #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") #fluid.layers.assign(x,scale_tensor) #output = fluid.layers.image_resize(input=input,scale=scale_tensor) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) input_data = np.random.rand(2,3,6,10).astype("float32") output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) print(output_data[0].shape) #1 # (2, 3, 12, 12) #2 # (2, 3, 12, 2) #3 # (2, 3, 3, 12) #4 # (2, 3, 3, 5) #imperative mode import paddle.fluid.dygraph as dg with dg.guard(place) as g: input = dg.to_variable(input_data) output = fluid.layers.image_resize(input=input, out_shape=[12,12]) print(output.shape) # [2L, 3L, 12L, 12L] """ resample_methods = { 'LINEAR': 'linear', 'BILINEAR': 'bilinear', 'TRILINEAR': 'trilinear', 'NEAREST': 'nearest', 'LINEAR': 'linear', } resample = resample.upper() if resample not in resample_methods: raise ValueError( "The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR', 'TRILINEAR' " "or 'NEAREST' currently.") resample_type = resample_methods[resample] if resample == 'LINEAR' and len(input.shape) != 3: raise ValueError("'LINER only support 3-D tensor.") elif resample in ['BILINEAR', 'NEAREST'] and len(input.shape) != 4: raise ValueError("'BILINEAR' and 'NEAREST' only support 4-D tensor.") elif resample == 'TRILINEAR' and len(input.shape) != 5: raise ValueError("'TRILINEAR'only support 5-D tensor.") if not isinstance(align_corners, bool): raise TypeError("Attr align_corners should be a bool value") if align_mode != 0 and align_mode != 1: raise ValueError("align_mode can only be 0 or 1") if out_shape is None and scale is None: raise ValueError("One of out_shape and scale must not be None.") helper = LayerHelper('{}_interp'.format(resample_type), **locals()) dtype = helper.input_dtype() if len(input.shape) == 3 and data_format not in ['NCW', 'NWC']: raise ValueError( "Got wrong value for param `data_format`: " + data_format + " received but only `NCW` or `NWC` supported for 3-D input.") elif len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']: raise ValueError( "Got wrong value for param `data_format`: " + data_format + " received but only `NCHW` or `NHWC` supported for 4-D input.") elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']: raise ValueError( "Got wrong value for param `data_format`: " + data_format + " received but only `NCDHW` or `NDHWC` supported for 5-D input.") def _is_list_or_turple_(data): return (isinstance(data, list) or isinstance(data, tuple)) if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW': data_layout = 'NCHW' if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC': data_layout = 'NHWC' inputs = {"X": input} attrs = { "out_d": -1, "out_h": -1, "out_w": -1, "interp_method": resample_type, "align_corners": align_corners, "align_mode": align_mode, "data_layout": data_layout } if out_shape is not None: if isinstance(out_shape, Variable): out_shape.stop_gradient = True inputs['OutSize'] = out_shape else: if not (_is_list_or_turple_(out_shape)): raise TypeError( "out_shape should be a list or tuple or Variable.") # Validate the shape contain_var = False for dim_idx, dim_size in enumerate(out_shape): if isinstance(dim_size, Variable): contain_var = True continue assert dim_size > 0, ( "Each dimension size given in out_shape must be greater than 0." ) if contain_var: new_size_tensor = [] size_list = [] for dim in out_shape: if isinstance(dim, Variable): dim.stop_gradient = True new_size_tensor.append(dim) size_list.append(-1) else: assert (isinstance(dim, int)) temp_out = helper.create_variable_for_type_inference( 'int32') fill_constant( [1], 'int32', dim, force_cpu=True, out=temp_out) new_size_tensor.append(temp_out) size_list.append(dim) inputs['SizeTensor'] = new_size_tensor if len(input.shape) == 3: if len(out_shape) != 1: raise ValueError("out_shape length should be 1 for " "input 3-D tensor.") if contain_var: attrs['out_w'] = size_list[0] else: out_shape = list(map(int, out_shape)) attrs['out_w'] = out_shape[0] elif len(input.shape) == 4: if len(out_shape) != 2: raise ValueError("out_shape length should be 2 for " "input 4-D tensor.") if contain_var: attrs['out_h'] = size_list[0] attrs['out_w'] = size_list[1] else: out_shape = list(map(int, out_shape)) attrs['out_h'] = out_shape[0] attrs['out_w'] = out_shape[1] if len(input.shape) == 5: if len(out_shape) != 3: raise ValueError("out_shape length should be 3 for " "input 5-D tensor.") if contain_var: attrs['out_d'] = size_list[0] attrs['out_h'] = size_list[1] attrs['out_w'] = size_list[2] else: out_shape = list(map(int, out_shape)) attrs['out_d'] = out_shape[0] attrs['out_h'] = out_shape[1] attrs['out_w'] = out_shape[2] else: if isinstance(scale, Variable): scale.stop_gradient = True inputs["Scale"] = scale elif isinstance(scale, float) or isinstance(scale, int): if scale <= 0: raise ValueError("Attr(scale) should be greater than zero.") attrs['scale'] = float(scale) else: raise TypeError( "Attr(scale)'s type should be float, int or Variable.") if isinstance(actual_shape, Variable): warnings.warn( "actual_shape will be deprecated, it is recommended to use " "out_shape instead of actual_shape to specify output shape dynamically." ) actual_shape.stop_gradient = True inputs["OutSize"] = actual_shape elif actual_shape is not None: raise TypeError("actual_shape should either be Variable or None.") out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='{}_interp'.format(resample_type), inputs=inputs, outputs={"Out": out}, attrs=attrs) return out @templatedoc(op_type="linear_interp") def resize_linear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCW'): """ This op resizes the input by performing linear interpolation based on given output shape which specified by actual_shape, out_shape and scale in priority order. **Warning:** the parameter :attr:`actual_shape` will be deprecated in the future and only use :attr:`out_shape` instead. Align_corners and align_mode are optional parameters,the calculation method of interpolation can be selected by them. Example: .. code-block:: text For scale: if align_corners = True && out_size > 1 : scale_factor = (in_size-1.0)/(out_size-1.0) else: scale_factor = float(in_size/out_size) Linear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,W_in) output: (N,C,W_out) where: W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,W_in) output: (N,C,W_out) where: W_out = W_{in} * scale_{factor} Parameters: input(Variable): 3-D Tensor(NCW), its data type is float32, float64, or uint8, its data format is specified by :attr:`data_format`. out_shape(list|tuple|Variable|None): Output shape of resize linear layer, the shape is (out_w,). Default: None. If a list, each element can be an integer or a Tensor Variable with shape: [1]. If a Tensor Variable, its dimension size should be 1. scale(float|Variable|None): The multiplier for the input height or width. At least one of :attr:`out_shape` or :attr:`scale` must be set. And :attr:`out_shape` has a higher priority than :attr:`scale`. Default: None. actual_shape(Variable): An optional input to specify output shape dynamically. If provided, image resize according to this given shape rather than :attr:`out_shape` and :attr:`scale` specifying shape. That is to say actual_shape has the highest priority. It is recommended to use :attr:`out_shape` if you want to specify output shape dynamically, because :attr:`actual_shape` will be deprecated. When using actual_shape to specify output shape, one of :attr:`out_shape` and :attr:`scale` should also be set, otherwise errors would be occurred in graph constructing stage. Default: None align_corners(bool): ${align_corners_comment} align_mode(bool): ${align_mode_comment} data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCW"`, `"NWC"`. The default is `"NCW"`. When it is `"NCW"`, the data is stored in the order of: `[batch_size, input_channels, input_width]`. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: 3-D tensor(NCW or NWC). Examples: .. code-block:: python #declarative mode import paddle.fluid as fluid import numpy as np input = fluid.data(name="input", shape=[None,3,100]) output = fluid.layers.resize_linear(input=input,out_shape=[50,]) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) input_data = np.random.rand(1,3,100).astype("float32") output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) print(output_data[0].shape) # (1, 3, 50) #imperative mode import paddle.fluid.dygraph as dg with dg.guard(place) as g: input = dg.to_variable(input_data) output = fluid.layers.resize_linear(input=input, out_shape=[50,]) print(output.shape) # [1L, 3L, 50L] """ return image_resize(input, out_shape, scale, name, 'LINEAR', actual_shape, align_corners, align_mode, data_format) @templatedoc(op_type="bilinear_interp") def resize_bilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW'): """ This op resizes the input by performing bilinear interpolation based on given output shape which specified by actual_shape, out_shape and scale in priority order. **Warning:** the parameter :attr:`actual_shape` will be deprecated in the future and only use :attr:`out_shape` instead. Bilinear interpolation is an extension of linear interpolation for interpolating functions of two variables (e.g. H-direction and W-direction in this op) on a rectilinear 2D grid. The key idea is to perform linear interpolation first in one direction, and then again in the other direction. For details of bilinear interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Bilinear_interpolation Align_corners and align_mode are optional parameters,the calculation method of interpolation can be selected by them. Example: .. code-block:: text For scale: if align_corners = True && out_size > 1 : scale_factor = (in_size-1.0)/(out_size-1.0) else: scale_factor = float(in_size/out_size) Bilinear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = (H_{in}+0.5) * scale_{factor} - 0.5 W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = H_{in} * scale_{factor} W_out = W_{in} * scale_{factor} Parameters: input(Variable): 4-D Tensor(NCHW), its data type is float32, float64, or uint8, its data format is specified by :attr:`data_format`. out_shape(list|tuple|Variable|None): Output shape of resize bilinear layer, the shape is (out_h, out_w).Default: None. If a list, each element can be an integer or a Tensor Variable with shape: [1]. If a Tensor Variable, its dimension size should be 1. scale(float|Variable|None): The multiplier for the input height or width. At least one of :attr:`out_shape` or :attr:`scale` must be set. And :attr:`out_shape` has a higher priority than :attr:`scale`. Default: None. actual_shape(Variable): An optional input to specify output shape dynamically. If provided, image resize according to this given shape rather than :attr:`out_shape` and :attr:`scale` specifying shape. That is to say actual_shape has the highest priority. It is recommended to use :attr:`out_shape` if you want to specify output shape dynamically, because :attr:`actual_shape` will be deprecated. When using actual_shape to specify output shape, one of :attr:`out_shape` and :attr:`scale` should also be set, otherwise errors would be occurred in graph constructing stage. Default: None align_corners(bool): ${align_corners_comment} align_mode(bool): ${align_mode_comment} data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: 4-D tensor(NCHW or NHWC). Examples: .. code-block:: python #declarative mode import paddle.fluid as fluid import numpy as np import paddle paddle.enable_static() input = fluid.data(name="input", shape=[None,3,6,10]) #1 output = fluid.layers.resize_bilinear(input=input,out_shape=[12,12]) #2 #x = np.array([2]).astype("int32") #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") #fluid.layers.assign(input=x, output=dim1) #output = fluid.layers.resize_bilinear(input=input,out_shape=[12,dim1]) #3 #x = np.array([3,12]).astype("int32") #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") #fluid.layers.assign(input=x, output=shape_tensor) #output = fluid.layers.resize_bilinear(input=input,out_shape=shape_tensor) #4 #x = np.array([0.5]).astype("float32") #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") #fluid.layers.assign(x,scale_tensor) #output = fluid.layers.resize_bilinear(input=input,scale=scale_tensor) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) input_data = np.random.rand(2,3,6,10).astype("float32") output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) print(output_data[0].shape) #1 # (2, 3, 12, 12) #2 # (2, 3, 12, 2) #3 # (2, 3, 3, 12) #4 # (2, 3, 3, 5) #imperative mode import paddle.fluid.dygraph as dg with dg.guard(place) as g: input = dg.to_variable(input_data) output = fluid.layers.resize_bilinear(input=input, out_shape=[12,12]) print(output.shape) # [2L, 3L, 12L, 12L] """ return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape, align_corners, align_mode, data_format) @templatedoc(op_type="trilinear_interp") def resize_trilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCDHW'): """ This op resizes the input by performing trilinear interpolation based on given output shape which specified by actual_shape, out_shape and scale in priority order. **Warning:** the parameter :attr:`actual_shape` will be deprecated in the future and only use :attr:`out_shape` instead. Trilinear interpolation is an extension of linear interpolation for interpolating functions of three variables (e.g. D-direction, H-direction and W-direction in this op) on a rectilinear 3D grid. The linear interpolation is performed on three directions. For details of trilinear interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Trilinear_interpolation Align_corners and align_mode are optional parameters,the calculation method of interpolation can be selected by them. Example: .. code-block:: text For scale: if align_corners = True && out_size > 1 : scale_factor = (in_size-1.0)/(out_size-1.0) else: scale_factor = float(in_size/out_size) Bilinear interpolation: if: align_corners = False , align_mode = 0 input : (N,C,D_in,H_in,W_in) output: (N,C,D_out,H_out,W_out) where: D_out = (D_{in}+0.5) * scale_{factor} - 0.5 H_out = (H_{in}+0.5) * scale_{factor} - 0.5 W_out = (W_{in}+0.5) * scale_{factor} - 0.5 else: input : (N,C,D_in,H_in,W_in) output: (N,C,D_out,H_out,W_out) where: D_out = D_{in} * scale_{factor} H_out = H_{in} * scale_{factor} W_out = W_{in} * scale_{factor} Parameters: input(${x_type}): 5-D Tensor, its data type is float32, float64, or uint8, its data format is specified by :attr:`data_format`. out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_d, out_h, out_w). Default: None. Every element should be an integer or a Tensor Variable with shape: [1] if it is a list. If it is a Tensor Variable, its dimension size should be 1. scale(float|Variable|None): The multiplier for the input depth, height or width. At least one of :attr:`out_shape` or :attr:`scale` must be set. And :attr:`out_shape` has a higher priority than :attr:`scale`. Default: None. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` actual_shape(Variable): An optional input to specify output shape dynamically. If provided, image resize according to this given shape rather than :attr:`out_shape` and :attr:`scale` specifying shape. That is to say actual_shape has the highest priority. It is recommended to use :attr:`out_shape` if you want to specify output shape dynamically, because :attr:`actual_shape` will be deprecated. When using actual_shape to specify output shape, one of :attr:`out_shape` and :attr:`scale` should also be set, otherwise errors would be occurred in graph constructing stage. Default: None align_corners(bool): ${align_corners_comment} align_mode(bool): ${align_mode_comment} data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`. The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`. Returns: Variable: A 5-D Tensor(NCDHW or NDHWC) Examples: .. code-block:: python #declarative mode import paddle.fluid as fluid import paddle import numpy as np paddle.enable_static() input = fluid.data(name="input", shape=[None,3,6,8,10]) #1 output = fluid.layers.resize_trilinear(input=input,out_shape=[12,12,12]) #2 #x = np.array([2]).astype("int32") #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") #fluid.layers.assign(input=x, output=dim1) #output = fluid.layers.resize_trilinear(input=input,out_shape=[12,dim1,4]) #3 #x = np.array([3,12,12]).astype("int32") #shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32") #fluid.layers.assign(input=x, output=shape_tensor) #output = fluid.layers.resize_trilinear(input=input,out_shape=shape_tensor) #4 #x = np.array([0.5]).astype("float32") #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") #fluid.layers.assign(x,scale_tensor) #output = fluid.layers.resize_trilinear(input=input,scale=scale_tensor) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) input_data = np.random.rand(2,3,6,8,10).astype("float32") output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) print(output_data[0].shape) #1 # (2, 3, 12, 12, 12) #2 # (2, 3, 12, 2, 4) #3 # (2, 3, 3, 12, 12) #4 # (2, 3, 3, 4, 5) #imperative mode import paddle.fluid.dygraph as dg with dg.guard(place) as g: input = dg.to_variable(input_data) output = fluid.layers.resize_trilinear(input=input, out_shape=[12,12,12]) print(output.shape) # [2L, 3L, 12L, 12L, 12L] """ return image_resize(input, out_shape, scale, name, 'TRILINEAR', actual_shape, align_corners, align_mode, data_format) @templatedoc(op_type="nearest_interp") def resize_nearest(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, data_format='NCHW'): """ This op resizes the input by performing nearest neighbor interpolation in both the height direction and the width direction based on given output shape which is specified by actual_shape, out_shape and scale in priority order. **Warning:** the parameter :attr:`actual_shape` will be deprecated in the future and only use :attr:`out_shape` instead. Example: .. code-block:: text For scale: if align_corners = True && out_size > 1 : scale_factor = (in_size-1.0)/(out_size-1.0) else: scale_factor = float(in_size/out_size) Nearest neighbor interpolation: if: align_corners = False input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = floor(H_{in} * scale_{factor}) W_out = floor(W_{in} * scale_{factor}) else: align_corners = True input : (N,C,H_in,W_in) output: (N,C,H_out,W_out) where: H_out = round(H_{in} * scale_{factor}) W_out = round(W_{in} * scale_{factor}) For details of nearest neighbor interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation Parameters: input(${x_type}): 4-D Tensor, its data type is float32, float64, or uint8, its data format is specified by :attr:`data_format`. out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_h, out_w). Default: None. Every element should be an integer or a tensor Variable with shape: [1] if it is a list. If it is a tensor Variable, its dimension size should be 1. scale(float|Variable|None): The multiplier for the input height or width. At least one of :attr:`out_shape` or :attr:`scale` must be set. And :attr:`out_shape` has a higher priority than :attr:`scale`. Default: None. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` actual_shape(Variable): An optional input to specify output shape dynamically. If provided, image resize according to this given shape rather than :attr:`out_shape` and :attr:`scale` specifying shape. That is to say actual_shape has the highest priority. It is recommended to use :attr:`out_shape` if you want to specify output shape dynamically, because :attr:`actual_shape` will be deprecated. When using actual_shape to specify output shape, one of :attr:`out_shape` and :attr:`scale` should also be set, otherwise errors would be occurred in graph constructing stage. Default: None align_corners(bool): ${align_corners_comment} data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. Returns: Variable: 4-D tensor(NCHW or NHWC). Examples: .. code-block:: python #declarative mode import paddle.fluid as fluid import numpy as np import paddle paddle.enable_static() input = fluid.data(name="input", shape=[None,3,6,10]) #1 output = fluid.layers.resize_nearest(input=input,out_shape=[12,12]) #2 #x = np.array([2]).astype("int32") #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") #fluid.layers.assign(input=x, output=dim1) #output = fluid.layers.resize_nearest(input=input,out_shape=[12,dim1]) #3 #x = np.array([3,12]).astype("int32") #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") #fluid.layers.assign(input=x, output=shape_tensor) #output = fluid.layers.resize_nearest(input=input,out_shape=shape_tensor) #4 #x = np.array([0.5]).astype("float32") #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32") #fluid.layers.assign(x,scale_tensor) #output = fluid.layers.resize_nearest(input=input,scale=scale_tensor) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) input_data = np.random.rand(2,3,6,10).astype("float32") output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) print(output_data[0].shape) #1 # (2, 3, 12, 12) #2 # (2, 3, 12, 2) #3 # (2, 3, 3, 12) #4 # (2, 3, 3, 5) #imperative mode import paddle.fluid.dygraph as dg with dg.guard(place) as g: input = dg.to_variable(input_data) output = fluid.layers.resize_nearest(input=input, out_shape=[12,12]) print(output.shape) # [2L, 3L, 12L, 12L] """ return image_resize( input, out_shape, scale, name, 'NEAREST', actual_shape, align_corners, align_mode=1, data_format=data_format) def image_resize_short(input, out_short_len, resample='BILINEAR'): """ This op resizes a batch of images. The short edge of input images will be resized to the given 'out_short_len'. The long edge of input images will be resized proportionately to make images' length-width ratio constant. Parameters: input (Variable): 4-D tensor(NCHW), The input tensor of image resize layer. out_short_len(int): The length of output images' short edge. resample (str): resample method, default: BILINEAR. Returns: Variable: 4-D tensor(NCHW). Examples: .. code-block:: python import paddle.fluid as fluid input = fluid.data(name="input", shape=[None,3,6,9], dtype="float32") out = fluid.layers.image_resize_short(input, out_short_len=3) """ in_shape = input.shape if len(in_shape) != 4: raise ValueError( "The rank of input must be 4 (num_batches, channels, in_h, in_w).") hw = in_shape[2:4] short_idx = hw.index(min(hw)) long_idx = 1 - short_idx out_shape = list(hw) out_shape[short_idx] = out_short_len out_shape[long_idx] = int( float(out_shape[long_idx]) * (float(out_short_len) / float(hw[ short_idx])) + 0.5) return image_resize(input=input, out_shape=out_shape, resample=resample) @deprecated(since="2.0.0", update_to="paddle.gather") def gather(input, index, overwrite=True): """ Output is obtained by gathering entries of the outer-most dimension of X indexed by `index` and concatenate them together. .. math:: Out = X[Index] .. code-block:: text Given: X = [[1, 2], [3, 4], [5, 6]] Index = [1, 2] Then: Out = [[3, 4], [5, 6]] Args: input (Tensor): The source input tensor with rank>=1. Supported data type is int32, int64, float32, float64 and uint8 (only for CPU), float16 (only for GPU). index (Tensor): The index input tensor with rank=1. Data type is int32 or int64. overwrite (bool, optional): The mode that updating the grad when has same index. If True, use the overwrite mode to update the grad of the same index, if False, use the accumulate mode to update the grad of the same index. Default value is True. Returns: output (Tensor): The output is a tensor with the same rank as input. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() x = fluid.data(name='x', shape=[-1, 5], dtype='float32') index = fluid.data(name='index', shape=[-1, 1], dtype='int32') output = fluid.layers.gather(x, index) """ if in_dygraph_mode(): return _C_ops.gather(input, index, None, 'overwrite', overwrite) check_variable_and_dtype( input, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], 'gather') check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather') helper = LayerHelper('gather', **locals()) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="gather", inputs={"X": input, "Index": index}, outputs={"Out": out}, attrs={'overwrite': overwrite}) return out @deprecated(since="2.0.0", update_to="paddle.gather_nd") def gather_nd(input, index, name=None): """ **Gather Nd Layer** This function is actually a high-dimensional extension of :code:`gather` and supports for simultaneous indexing by multiple axes. :attr:`index` is a K-dimensional integer tensor, which is regarded as a (K-1)-dimensional tensor of :attr:`index` into :attr:`input`, where each element defines a slice of params: .. math:: output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]] Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` . .. code-block:: text Given: input = [[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]] input.shape = (2, 3, 4) * Case 1: index = [[1]] gather_nd(input, index) = [input[1, :, :]] = [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]] * Case 2: index = [[0,2]] gather_nd(input, index) = [input[0, 2, :]] = [8, 9, 10, 11] * Case 3: index = [[1, 2, 3]] gather_nd(input, index) = [input[1, 2, 3]] = [23] Args: input (Tensor): The input Tensor which it's data type should be bool, float32, float64, int32, int64. index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank. Its dtype should be int32, int64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: output (Tensor): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:] Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() x = fluid.data(name='x', shape=[3, 4, 5], dtype='float32') index = fluid.data(name='index', shape=[2, 2], dtype='int32') output = fluid.layers.gather_nd(x, index) """ if in_dygraph_mode(): return _C_ops.gather_nd(input, index) check_variable_and_dtype(input, 'input', ['bool', 'float32', 'float64', 'int32', 'int64'], 'gather_np') check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather_np') helper = LayerHelper('gather_nd', **locals()) dtype = helper.input_dtype() output = helper.create_variable_for_type_inference(dtype) helper.append_op( type="gather_nd", inputs={"X": input, "Index": index}, outputs={"Out": output}) return output @deprecated(since="2.0.0", update_to="paddle.scatter") def scatter(input, index, updates, name=None, overwrite=True): """ :alias_main: paddle.scatter :alias: paddle.scatter,paddle.tensor.scatter,paddle.tensor.manipulation.scatter :old_api: paddle.fluid.layers.scatter **Scatter Layer** Output is obtained by updating the input on selected indices based on updates. .. code-block:: python import numpy as np #input: input = np.array([[1, 1], [2, 2], [3, 3]]) index = np.array([2, 1, 0, 1]) # shape of updates should be the same as input # shape of updates with dim > 1 should be the same as input updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]) overwrite = False # calculation: if not overwrite: for i in range(len(index)): input[index[i]] = np.zeros((2)) for i in range(len(index)): if (overwrite): input[index[i]] = updates[i] else: input[index[i]] += updates[i] # output: out = np.array([[3, 3], [6, 6], [1, 1]]) out.shape # [3, 2] Args: input (Variable): The input N-D Tensor with rank>=1. Data type can be float32. index (Variable): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length. updates (Variable): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . overwrite (bool): The mode that updating the output when there are same indices. If True, use the overwrite mode to update the output of the same index, if False, use the accumulate mode to update the output of the same index. Default value is True. Returns: Variable(Tensor|LoDTensor): The output is a Tensor with the same shape as input. Examples: .. code-block:: python import paddle import numpy as np import paddle.fluid as fluid paddle.enable_static() input = fluid.layers.data(name='data', shape=[3, 2], dtype='float32', append_batch_size=False) index = fluid.layers.data(name='index', shape=[4], dtype='int64', append_batch_size=False) updates = fluid.layers.data(name='update', shape=[4, 2], dtype='float32', append_batch_size=False) output = fluid.layers.scatter(input, index, updates, overwrite=False) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) in_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) index_data = np.array([2, 1, 0, 1]).astype(np.int64) update_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32) res = exe.run(fluid.default_main_program(), feed={'data':in_data, "index":index_data, "update":update_data}, fetch_list=[output]) print(res) # [array([[3., 3.], # [6., 6.], # [1., 1.]], dtype=float32)] """ helper = LayerHelper('scatter', **locals()) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="scatter", inputs={"X": input, "Ids": index, "Updates": updates}, attrs={'overwrite': overwrite}, outputs={"Out": out}) return out def scatter_nd_add(ref, index, updates, name=None): r""" **Scatter_nd_add Layer** Output is obtained by applying sparse addition to a single value or slice in a Variable. :attr:`ref` is a Tensor with rank :math:`R` and :attr:`index` is a Tensor with rank :math:`K` . Thus, :attr:`index` has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates` is a Tensor with rank :math:`K - 1 + R - Q` and its shape is :math:`index.shape[:-1] + ref.shape[index.shape[-1]:]` . According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` , add the corresponding :attr:`updates` slice to the :attr:`ref` slice which is obtained by the last one dimension of :attr:`index` . .. code-block:: text Given: * Case 1: ref = [0, 1, 2, 3, 4, 5] index = [[1], [2], [3], [1]] updates = [9, 10, 11, 12] we get: output = [0, 22, 12, 14, 4, 5] * Case 2: ref = [[65, 17], [-14, -25]] index = [[], []] updates = [[[-1, -2], [1, 2]], [[3, 4], [-3, -4]]] ref.shape = (2, 2) index.shape = (2, 0) updates.shape = (2, 2, 2) we get: output = [[67, 19], [-16, -27]] Args: ref (Variable): The ref input. Its dtype should be int32, int64, float32, float64. index (Variable): The index input with rank > 1 and index.shape[-1] <= ref.rank. Its dtype should be int32 or int64 as it is used as indexes. updates (Variable): The updated value of scatter_nd_add op, and it must have the same dtype as ref. It must have the shape index.shape[:-1] + ref.shape[index.shape[-1]:]. name (str|None): The output variable name. If set None, the layer will be named automatically. Returns: output (Variable): The output is a tensor with the same shape and dtype as ref. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() ref = fluid.data(name='ref', shape=[3, 5, 9, 10], dtype='float32') index = fluid.data(name='index', shape=[3, 2], dtype='int32') updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32') output = fluid.layers.scatter_nd_add(ref, index, updates) """ if in_dygraph_mode(): op = getattr(_C_ops, 'scatter_nd_add') return op(ref, index, updates) if ref.dtype != updates.dtype: raise ValueError("ref and updates must have same data type.") helper = LayerHelper('scatter_nd_add', **locals()) dtype = helper.input_dtype(input_param_name='ref') output = helper.create_variable_for_type_inference(dtype) helper.append_op( type="scatter_nd_add", inputs={"X": ref, "Index": index, "Updates": updates}, outputs={"Out": output}) return output def scatter_nd(index, updates, shape, name=None): """ **Scatter_nd Layer** Output is obtained by scattering the :attr:`updates` in a new tensor according to :attr:`index` . This op is similar to :code:`scatter_nd_add`, except the tensor of :attr:`shape` is zero-initialized. Correspondingly, :code:`scatter_nd(index, updates, shape)` is equal to :code:`scatter_nd_add(paddle.zeros(shape, updates.dtype), index, updates)` . If :attr:`index` has repeated elements, then the corresponding updates are accumulated. Because of the numerical approximation issues, the different order of repeated elements in :attr:`index` may cause different results. The specific calculation method can be seen :code:`scatter_nd_add` . This op is the inverse of the :code:`gather_nd` op. Args: index (Tensor): The index input with ndim > 1 and index.shape[-1] <= len(shape). Its dtype should be int32 or int64 as it is used as indexes. updates (Tensor): The updated value of scatter_nd op. Its dtype should be float32, float64. It must have the shape index.shape[:-1] + shape[index.shape[-1]:] shape(tuple|list): Shape of output tensor. name (str|None): The output Tensor name. If set None, the layer will be named automatically. Returns: output (Tensor): The output is a tensor with the same type as :attr:`updates` . Examples: .. code-block:: python import paddle import numpy as np index_data = np.array([[1, 1], [0, 1], [1, 3]]).astype(np.int64) index = paddle.to_tensor(index_data) updates = paddle.rand(shape=[3, 9, 10], dtype='float32') shape = [3, 5, 9, 10] output = paddle.scatter_nd(index, updates, shape) """ return scatter_nd_add(zeros(shape, updates.dtype), index, updates, name) @templatedoc() def random_crop(x, shape, seed=None): """ ${comment} Args: x(${x_type}): ${x_comment} shape(${shape_type}): ${shape_comment} seed(int|${seed_type}|None): ${seed_comment} By default, the seed will get from `random.randint(-65536, 65535)`. Returns: ${out_comment} Examples: .. code-block:: python import paddle.fluid as fluid img = fluid.data("img", [None, 3, 256, 256]) # cropped_img is [-1, 3, 224, 224] cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224]) # cropped_img2 shape: [-1, 2, 224, 224] # cropped_img2 = fluid.layers.random_crop(img, shape=[2, 224, 224]) # cropped_img3 shape: [-1, 3, 128, 224] # cropped_img3 = fluid.layers.random_crop(img, shape=[128, 224]) """ helper = LayerHelper("random_crop", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32'], 'random_crop') check_type(shape, 'shape', (list, Variable), 'random_crop') dtype = x.dtype out = helper.create_variable_for_type_inference(dtype) if seed is None: seed = np.random.randint(-65536, 65536) op_attrs = {"shape": shape} if isinstance(seed, int): op_attrs["startup_seed"] = seed seed = helper.create_variable( name=unique_name.generate("random_crop_seed"), dtype="int64", persistable=True) elif not isinstance(seed, Variable): raise ValueError("'seed' must be a Variable or an int.") helper.append_op( type="random_crop", inputs={"X": x, "Seed": seed}, outputs={"Out": out, "SeedOut": seed}, attrs=op_attrs) return out def log(x, name=None): r""" Calculates the natural log of the given input tensor, element-wise. .. math:: Out = \\ln(x) Args: x (Tensor): Input Tensor. Must be one of the following types: float32, float64. name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: The natural log of the input Tensor computed element-wise. Examples: .. code-block:: python import paddle x = [[2,3,4], [7,8,9]] x = paddle.to_tensor(x, dtype='float32') res = paddle.log(x) # [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]] """ if in_dygraph_mode(): return _C_ops.log(x) check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log") inputs = {'X': [x]} helper = LayerHelper('log', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.relu") def relu(x, name=None): """ ${comment} Args: x(Variable): ${x_comment} name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Variable: ${out_comment} Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np in1 = np.array([[-1,0],[1,2.6]]) with fluid.dygraph.guard(): x1 = fluid.dygraph.to_variable(in1) out1 = fluid.layers.relu(x1) print(out1.numpy()) # [[0. 0. ] # [1. 2.6]] """ if in_dygraph_mode(): return _C_ops.relu(x) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu') inputs = {'X': [x]} helper = LayerHelper('relu', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.selu") def selu(x, scale=None, alpha=None, name=None): r""" Selu Operator. The equation is: .. math:: selu= \\lambda* \\begin{cases} x &\\quad \\text{ if } x>0 \n \\alpha * e^x - \\alpha &\\quad \\text{ if } x<=0 \\end{cases} The input `X` can carry the LoD (Level of Details) information, or not. And the output shares the LoD information with input `X`. Args: x (Variable): The input N-D Tensor. scale(float, optional): lambda in selu activation function, the default value is 1.0507009873554804934193349852946. For more information about this value, please refer to: https://arxiv.org/abs/1706.02515. alpha(float, optional): alpha in selu activation function, the default value is 1.6732632423543772848170429916717. For more information about this value, please refer to: https://arxiv.org/abs/1706.02515. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Variable(Tensor|LoDTensor): The output Tensor or LoDTensor with the same shape and LoD information as input. Examples: .. code-block:: python import paddle import paddle.fluid as fluid import numpy as np paddle.enable_static() inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32") output = fluid.layers.selu(inputs) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) img = np.array([[0, 1],[2, 3]]).astype(np.float32) res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) print(res) # [array([[0. , 1.050701],[2.101402, 3.152103]], dtype=float32)] """ check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'selu') helper = LayerHelper('selu', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) attrs = {} if scale is not None: attrs["scale"] = scale if alpha is not None: attrs["alpha"] = alpha helper.append_op( type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs) return out def mean_iou(input, label, num_classes): r""" Mean Intersection-Over-Union is a common evaluation metric for semantic image segmentation, which first computes the IOU for each semantic class and then computes the average over classes. IOU is defined as follows: .. math:: IOU = \\frac{true\_positive}{(true\_positive + false\_positive + false\_negative)}. The predictions are accumulated in a confusion matrix and mean-IOU is then calculated from it. Parameters: input (Tensor): A n-D Tensor of prediction results for semantic labels with type int32 or int64. label (Tensor): A Tensor of ground truth labels with type int32 or int64. Its shape should be the same as input. num_classes (int32): The possible number of labels. Returns: Three Tensors. - mean_iou(Tensor) : A 1-D Tensor representing the mean intersection-over-union with shape [1]. \ Data type is float32. - out_wrong(Tensor) : A 1-D Tensor with shape [num_classes]. Data type is int32. \ The wrong numbers of each class. - out_correct(Tensor): A 1-D Tensor with shape [num_classes]. Data type is int32. The correct numbers of each class. Examples: .. code-block:: python import paddle iou_shape = [64, 32, 32] num_classes = 5 predict = paddle.randint(low=0, high=255, shape=iou_shape, dtype='int64') label = paddle.randint(low=0, high=255, shape=iou_shape, dtype='int64') mean_iou, out_wrong, out_correct = paddle.metric.mean_iou(predict, label, num_classes) """ if in_dygraph_mode(): return _C_ops.mean_iou(input, label, 'num_classes', num_classes) helper = LayerHelper('mean_iou', **locals()) check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'], 'mean_iou') check_variable_and_dtype(label, 'Labels', ['int32', 'int64'], 'mean_iou') out_mean_iou = helper.create_variable_for_type_inference(dtype='float32') out_wrong = helper.create_variable_for_type_inference(dtype='int32') out_correct = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type="mean_iou", inputs={"Predictions": input, "Labels": label}, outputs={ "OutMeanIou": out_mean_iou, "OutWrong": out_wrong, "OutCorrect": out_correct }, attrs={"num_classes": num_classes}) return out_mean_iou, out_wrong, out_correct def crop(x, shape=None, offsets=None, name=None): """ Crop input into output, as specified by offsets and shape. **Warning:** THIS OP IS DEPRECATED. It will be removed in the future version. Instructions for updating: Use :ref:`api_fluid_layers_crop_tensor` instead. .. code-block:: text * Case 1: Given X = [[0, 1, 2, 0, 0] [0, 3, 4, 0, 0] [0, 0, 0, 0, 0]], and shape = [2, 2], offsets = [0, 1], output is: Out = [[1, 2], [3, 4]]. * Case 2: Given X = [[0, 1, 2, 5, 0] [0, 3, 4, 6, 0] [0, 0, 0, 0, 0]], and shape is tensor shape = [[0, 0, 0] [0, 0, 0]] and offsets = [0, 1], output is: Out = [[1, 2, 5], [3, 4, 6]]. Parameters: x (Variable): Tensor, data type can be float32 or float64. shape (Variable|list/tuple of integers): The output shape is specified by `shape`, which can be a Tensor or a list/tuple of integers. If it is a Tensor, it's rank must be the same as `x` , only it's shape will be used, and the value of it will be ignored. This way is suitable for the case that the output shape may be changed each iteration. If it is a list/tuple of integers, it's length must be the same as the rank of `x` offsets (Variable|list/tuple of integers|None): Specifies the cropping offsets at each dimension. It can be a Tensor or a list/tuple of integers. If it is a Tensor, it's rank must be the same as `x`. This way is suitable for the case that the offsets may be changed each iteration. If it is a list/tuple of integers, it's length must be the same as the rank of `x`. If None, the offsets are 0 at each dimension. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name` . Usually name is no need to set and None by default. Returns: The cropped Tensor, which has the same rank and data type with `x` Return Type: Variable Raises: ValueError: If shape is not a list, tuple or Variable. Examples: .. code-block:: python import paddle.fluid as fluid import paddle.fluid as fluid import paddle paddle.enable_static() x = fluid.data(name="x", shape=[3, 3, 5], dtype="float32") y = fluid.data(name="y", shape=[2, 2, 3], dtype="float32") crop = fluid.layers.crop(x, shape=y) # or z = fluid.data(name="z", shape=[3, 3, 5], dtype="float32") crop = fluid.layers.crop(z, shape=[2, 2, 3]) """ check_variable_and_dtype(x, 'x', ['float32'], 'crop') check_type(shape, 'shape', (list, tuple, Variable), 'crop') helper = LayerHelper('crop', **locals()) if offsets is None: offsets = [0] * len(x.shape) out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x} attrs = {} if isinstance(shape, Variable): ipts['Y'] = shape else: attrs['shape'] = shape if isinstance(offsets, Variable): ipts['Offsets'] = offsets else: attrs['offsets'] = offsets helper.append_op( type='crop', inputs=ipts, outputs={'Out': out}, attrs=None if len(attrs) == 0 else attrs) return out def crop_tensor(x, shape=None, offsets=None, name=None): """ Crop input into output, as specified by offsets and shape. .. code-block:: text * Case 1 (input is a 2-D Tensor): Input: X.shape = [3, 5] X.data = [[0, 1, 2, 0, 0], [0, 3, 4, 0, 0], [0, 0, 0, 0, 0]] Parameters: shape = [2, 2] offsets = [0, 1] Output: Out.shape = [2, 2] Out.data = [[1, 2], [3, 4]] * Case 2 (input is a 3-D Tensor): Input: X.shape = [2, 3, 4] X.data = [[[0, 1, 2, 3], [0, 5, 6, 7], [0, 0, 0, 0]], [[0, 3, 4, 5], [0, 6, 7, 8], [0, 0, 0, 0]]] Parameters: shape = [2, 2, -1] offsets = [0, 0, 1] Output: Out.shape = [2, 2, 3] Out.data = [[[1, 2, 3], [5, 6, 7]], [[3, 4, 5], [6, 7, 8]]] Parameters: x (Tensor): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64. shape (list|tuple|Tensor): The output shape is specified by `shape`. Its data type is int32. If a list/tuple, it's length must be the same as the dimension size of `x`. If a Tensor, it should be a 1-D Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1]. If Variable contained, it is suitable for the case that the shape may be changed each iteration. offsets (list|tuple|Variable, optional): Specifies the cropping offsets at each dimension. Its data type is int32. If a list/tuple, it's length must be the same as the dimension size of `x`. If a Tensor, it should be a 1-D Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1]. If Variable contained, it is suitable for the case that the offsets may be changed each iteration. Default: None, the offsets are 0 at each dimension. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor: The cropped Tensor has same data type with `x`. Examples: .. code-block:: python :name: code-example1 import paddle x = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) # x.shape = [3, 3] # x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] # shape can be a 1-D Tensor or list or tuple. shape = paddle.to_tensor([2, 2], dtype='int32') # shape = [2, 2] # shape = (2, 2) out = paddle.crop(x, shape) # out.shape = [2, 2] # out = [[1,2], [4,5]] # offsets can be a 1-D Tensor or list or tuple. offsets = paddle.to_tensor([0, 1], dtype='int32') # offsets = [1, 0] # offsets = (1, 1) out = paddle.crop(x, shape, offsets) # out.shape = [2, 2] # if offsets = [0, 0], out = [[1,2], [4,5]] # if offsets = [0, 1], out = [[2,3], [5,6]] # if offsets = [1, 0], out = [[4,5], [7,8]] # if offsets = [1, 1], out = [[5,6], [8,9]] """ helper = LayerHelper('crop_tensor', **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'crop_tensor') check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor') check_type(offsets, 'offsets', (list, tuple, Variable, type(None)), 'crop_tensor') if offsets is None: offsets = [0] * len(x.shape) out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x} attrs = {} def _attr_shape_check(shape_val): if not isinstance(shape_val, int): raise TypeError( "Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s." % type(shape_val)) if shape_val == 0: raise ValueError( "Attr(shape) of Op(crop_tensor) should not be zero, but received: %s." % str(shape_val)) if shape_val < -1: raise ValueError( "When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s." % str(shape_val)) def _attr_offsets_check(offset_val): if not isinstance(offset_val, int): raise TypeError( "Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s." % type(offset_val)) if offset_val < 0: raise ValueError( "Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s." % str(offset_val)) if isinstance(offsets, Variable): offsets.stop_gradient = True ipts['Offsets'] = offsets attrs['offsets'] = [-1] * len(x.shape) elif utils._contain_var(offsets): new_offsets_tensor = [] offsets_attr = [] for dim in offsets: if isinstance(dim, Variable): dim.stop_gradient = True new_offsets_tensor.append(dim) offsets_attr.append(-1) else: _attr_offsets_check(dim) temp_out = helper.create_variable_for_type_inference('int32') fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out) new_offsets_tensor.append(temp_out) offsets_attr.append(dim) ipts['OffsetsTensor'] = new_offsets_tensor attrs['offsets'] = offsets_attr else: for offset in offsets: _attr_offsets_check(offset) attrs['offsets'] = offsets if isinstance(shape, Variable): shape.stop_gradient = True ipts['Shape'] = shape elif utils._contain_var(shape): new_shape_tensor = [] shape_attr = [] for dim_size in shape: if isinstance(dim_size, Variable): dim_size.stop_gradient = True new_shape_tensor.append(dim_size) shape_attr.append(0) else: _attr_shape_check(dim_size) temp_out = helper.create_variable_for_type_inference('int32') fill_constant( [1], 'int32', dim_size, force_cpu=True, out=temp_out) new_shape_tensor.append(temp_out) shape_attr.append(dim_size) ipts['ShapeTensor'] = new_shape_tensor attrs['shape'] = shape_attr else: for dim_size in shape: _attr_shape_check(dim_size) attrs['shape'] = shape helper.append_op( type='crop_tensor', inputs=ipts, outputs={'Out': out}, attrs=None if len(attrs) == 0 else attrs) return out def affine_grid(theta, out_shape, name=None): """ :alias_main: paddle.nn.functional.affine_grid :alias: paddle.nn.functional.affine_grid,paddle.nn.functional.vision.affine_grid :old_api: paddle.fluid.layers.affine_grid It generates a grid of (x,y) coordinates using the parameters of the affine transformation that correspond to a set of points where the input feature map should be sampled to produce the transformed output feature map. Args: theta (Variable) - A Tensor with shape [N, 2, 3]. It contains a batch of affine transform parameters. The data type can be float32 or float64. out_shape (Variable | list | tuple): The shape of target output with format [batch_size, channel, height, width]. ``out_shape`` can be a Tensor or a list or tuple. The data type must be int32. name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Variable: A Tensor with shape [batch_size, H, W, 2] while 'H' and 'W' are the height and width of feature map in affine transformation. The data type is the same as `theta`. Raises: ValueError: If the type of arguments is not supported. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np place = fluid.CPUPlace() theta = fluid.data(name="x", shape=[None, 2, 3], dtype="float32") out_shape = fluid.data(name="y", shape=[4], dtype="int32") grid_0 = fluid.layers.affine_grid(theta, out_shape) grid_1 = fluid.layers.affine_grid(theta, [5, 3, 28, 28]) batch_size=2 exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) output= exe.run(feed={"x": np.random.rand(batch_size,2,3).astype("float32"), "y": np.array([5, 3, 28, 28]).astype("int32")}, fetch_list=[grid_0.name, grid_1.name]) print(output[0]) print(output[1]) """ helper = LayerHelper('affine_grid') check_variable_and_dtype(theta, 'theta', ['float32', 'float64'], 'affine_grid') if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \ isinstance(out_shape, Variable)): raise ValueError("The out_shape should be a list, tuple or Variable.") if not isinstance(theta, Variable): raise ValueError("The theta should be a Variable.") out = helper.create_variable_for_type_inference(theta.dtype) ipts = {'Theta': theta} attrs = {} if isinstance(out_shape, Variable): ipts['OutputShape'] = out_shape check_variable_and_dtype(out_shape, 'out_shape', ['int32'], 'affine_grid') else: attrs['output_shape'] = out_shape if core.is_compiled_with_rocm(): # ROCM platform do not have MIOPEN kernel for affine_grid attrs['use_cudnn'] = False helper.append_op( type='affine_grid', inputs=ipts, outputs={'Output': out}, attrs=None if len(attrs) == 0 else attrs) return out def pad2d(input, paddings=[0, 0, 0, 0], mode='constant', pad_value=0.0, data_format="NCHW", name=None): """ Pad 2-d images according to 'paddings' and 'mode'. If mode is 'reflect', paddings[0] and paddings[1] must be no greater than height-1. And the width dimension has the same condition. Parameters: input (Tensor): The input image with [N, C, H, W] format or [N, H, W, C] format, which is a 4-D Tensor with data type float32. paddings (Tensor | List[int32]): The padding size. If padding is a List, it must contain four integers, (padding_top, padding_bottom, padding_left, padding_right). Otherwise, it is a 1-D Tensor with shape [4]. Data type is int32. Default is [0, 0, 0, 0]. mode (str): Three modes: 'constant' (default), 'reflect', 'edge' . When in 'constant' mode, this op uses a constant value to pad the input tensor. When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. When in 'edge' mode, uses input boundaries to pad the input tensor. Default is 'constant' pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0 data_format (str): An string from: "NHWC", "NCHW". Specify the data format of the input data. Default is "NCHW" name (str, optional) : The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor, a 4-D Tensor padded according to paddings and mode and data type is same as input. Examples: .. code-block:: text Input = [[[[1., 2., 3.], [4., 5., 6.]]]] Case 0: paddings = [0, 1, 2, 3], mode = 'constant' pad_value = 0 Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.], [0., 0., 4., 5., 6., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.]]]] Case 1: paddings = [0, 1, 2, 1], mode = 'reflect' Out = [[[[3., 2., 1., 2., 3., 2.], [6., 5., 4., 5., 6., 5.], [3., 2., 1., 2., 3., 2.]]]] Case 2: paddings = [0, 1, 2, 1], mode = 'edge' Out = [[[[1., 1., 1., 2., 3., 3.], [4., 4., 4., 5., 6., 6.], [4., 4., 4., 5., 6., 6.]]]] Code Examples: .. code-block:: python import numpy as np import paddle import paddle.nn.functional as F # example 1 x_shape = (1, 1, 3, 4) x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 tensor_x = paddle.to_tensor(x) y = paddle.fluid.layers.pad2d(tensor_x, paddings=[1, 2, 2, 1], pad_value=1, mode='constant') print(y.numpy()) # [[[[ 1. 1. 1. 1. 1. 1. 1.] # [ 1. 1. 1. 2. 3. 4. 1.] # [ 1. 1. 5. 6. 7. 8. 1.] # [ 1. 1. 9. 10. 11. 12. 1.] # [ 1. 1. 1. 1. 1. 1. 1.] # [ 1. 1. 1. 1. 1. 1. 1.]]]] # example 2 x_shape = (1, 1, 2, 3) x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 tensor_x = paddle.to_tensor(x) y = paddle.fluid.layers.pad2d(tensor_x, paddings=[1, 1, 1, 1], mode='reflect') print(y.numpy()) # [[[[5. 4. 5. 6. 5.] # [2. 1. 2. 3. 2.] # [5. 4. 5. 6. 5.] # [2. 1. 2. 3. 2.]]]] """ if in_dygraph_mode(): _paddings = paddings.numpy().tolist() if isinstance( paddings, Variable) else paddings return _C_ops.pad2d(input, 'mode', mode, 'pad_value', pad_value, 'data_format', data_format, 'paddings', _paddings) check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], "pad2d") attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format} inputs = {'X': [input]} if isinstance(paddings, Variable): inputs['Paddings'] = [paddings] attrs['paddings'] = [] else: attrs['paddings'] = paddings helper = LayerHelper('pad2d', **locals()) assert mode in ['reflect', 'edge', 'constant' ], "mode should be one of constant, reflect, edge." dtype = helper.input_dtype(input_param_name='input') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.elu") def elu(x, alpha=1.0, name=None): """ :alias_main: paddle.nn.functional.elu :alias: paddle.nn.functional.elu,paddle.nn.functional.activation.elu :old_api: paddle.fluid.layers.elu ${comment} Args: x(${x_type}): ${x_comment} alpha(${alpha_type}|1.0): ${alpha_comment} name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: ${out_type}: ${out_comment} Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np input_elu = np.array([[-1,6],[1,15.6]]) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(input_elu) y = fluid.layers.elu(x, alpha=0.2) print(y.numpy()) # [[-0.12642411 6. ] # [ 1. 15.6 ]] """ helper = LayerHelper('elu', **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='elu', inputs={'X': x}, outputs={'Out': out}, attrs={'alpha': alpha}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.relu6") def relu6(x, threshold=6.0, name=None): """ ${comment} Args: x(${x_type}): ${x_comment} threshold(float, optional): ${threshold_comment} name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: output(${out_type}): ${out_comment} Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np in1 = np.array([[-1,0],[2.5,7.8]]) with fluid.dygraph.guard(): x1 = fluid.dygraph.to_variable(in1) out1 = fluid.layers.relu6(x=x1, threshold=6.0) print(out1.numpy()) # [[0. 0. ] # [2.5 6. ]] """ check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6') helper = LayerHelper('relu6', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='relu6', inputs={'X': x}, outputs={'Out': out}, attrs={ 'threshold': threshold, 'use_mkldnn': _global_flags()["FLAGS_use_mkldnn"] }) return out @templatedoc() def pow(x, factor=1.0, name=None): """ This is Pow Activation Operator. :math:`out = x^{factor}` Args: x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``. factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``. The exponential factor of Pow. Default 1.0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. Examples: .. code-block:: python import paddle.fluid as fluid x = fluid.data(name="x", shape=[32,32], dtype="float32") # example 1: argument factor is float y_1 = fluid.layers.pow(x, factor=2.0) # y_1 is x^{2.0} # example 2: argument factor is Variable factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0) y_2 = fluid.layers.pow(x, factor=factor_tensor) # y_2 is x^{3.0} """ check_variable_and_dtype( x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'pow') helper = LayerHelper('pow', **locals()) inputs = {'X': x} attrs = {} if isinstance(factor, Variable): check_variable_and_dtype(factor, 'factor', ['float32'], 'pow') factor.stop_gradient = True inputs['FactorTensor'] = factor else: attrs['factor'] = factor out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @templatedoc() def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): """ stanh activation. .. math:: out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}} Parameters: x (Tensor): The input Tensor with data type float32, float64. scale_a (float, optional): The scale factor a of the input. Default is 0.67. scale_b (float, optional): The scale factor b of the output. Default is 1.7159. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: A Tensor with the same data type and shape as ``x`` . Examples: .. code-block:: python import paddle x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) out = paddle.stanh(x, scale_a=0.67, scale_b=1.72) # [1.00616539, 1.49927628, 1.65933108, 1.70390463] """ if in_dygraph_mode(): return _C_ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh') helper = LayerHelper('stanh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='stanh', inputs={'X': x}, outputs={'Out': out}, attrs={'scale_a': scale_a, 'scale_b': scale_b}) return out @templatedoc() def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): """ ${comment} Parameters: x (${x_type}): ${x_comment} slope (float, optional): ${slope_comment} offset (float, optional): ${offset_comment} name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: ${out_type}: ${out_comment} Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() data = fluid.layers.fill_constant(shape=[3, 2], value=0.5, dtype='float32') # [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]] result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]] """ if in_dygraph_mode(): return _C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'hard_sigmoid') helper = LayerHelper('hard_sigmoid', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='hard_sigmoid', inputs={'X': x}, outputs={'Out': out}, attrs={'slope': slope, 'offset': offset}) return out @templatedoc() def swish(x, beta=1.0, name=None): r""" :alias_main: paddle.nn.functional.swish :alias: paddle.nn.functional.swish,paddle.nn.functional.activation.swish :old_api: paddle.fluid.layers.swish Elementwise swish activation function. See `Searching for Activation Functions <https://arxiv.org/abs/1710.05941>`_ for more details. Equation: .. math:: out = \\frac{x}{1 + e^{- beta * x}} Args: x(Variable): Tensor or LoDTensor, dtype: float32 or float64, the input of swish activation. beta(float): Constant beta of swish operator, default 1.0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Variable: Output of the swish activation, Tensor or LoDTensor, with the same dtype and shape with the input x. Examples: .. code-block:: python # declarative mode import numpy as np from paddle import fluid x = fluid.data(name="x", shape=(-1, 3), dtype="float32") y = fluid.layers.swish(x, beta=2.0) place = fluid.CPUPlace() exe = fluid.Executor(place) start = fluid.default_startup_program() main = fluid.default_main_program() data = np.random.randn(2, 3).astype("float32") exe.run(start) y_np, = exe.run(main, feed={"x": data}, fetch_list=[y]) data # array([[-1.1239197 , 1.3391294 , 0.03921051], # [ 1.1970421 , 0.02440812, 1.2055548 ]], dtype=float32) y_np # array([[-0.2756806 , 1.0610548 , 0.01998957], # [ 0.9193261 , 0.01235299, 0.9276883 ]], dtype=float32) .. code-block:: python # imperative mode import numpy as np from paddle import fluid import paddle.fluid.dygraph as dg data = np.random.randn(2, 3).astype("float32") place = fluid.CPUPlace() with dg.guard(place) as g: x = dg.to_variable(data) y = fluid.layers.swish(x) y_np = y.numpy() data # array([[-0.0816701 , 1.1603649 , -0.88325626], # [ 0.7522361 , 1.0978601 , 0.12987892]], dtype=float32) y_np # array([[-0.03916847, 0.8835007 , -0.25835553], # [ 0.51126915, 0.82324016, 0.06915068]], dtype=float32) """ check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish') helper = LayerHelper('swish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='swish', inputs={'X': x}, outputs={'Out': out}, attrs={'slope': beta}) return out @deprecated(since="2.0.0", update_to="paddle.static.nn.prelu") def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): r""" prelu activation. .. math:: prelu(x) = max(0, x) + \alpha * min(0, x) There are three modes for the activation: .. code-block:: text all: All elements share same alpha. channel: Elements in same channel share same alpha. element: All elements do not share alpha. Each element has its own alpha. Parameters: x (Tensor): The input Tensor or LoDTensor with data type float32. mode (str): The mode for weight sharing. param_attr (ParamAttr|None, optional): The parameter attribute for the learnable \ weight (alpha), it can be create by ParamAttr. None by default. \ For detailed information, please refer to :ref:`api_fluid_ParamAttr`. name (str, optional): Name for the operation (optional, default is None). \ For more information, please refer to :ref:`api_guide_Name`. data_format(str, optional): Data format that specifies the layout of input. It may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default: "NCHW". Returns: Tensor: A tensor with the same shape and data type as x. Examples: .. code-block:: python import paddle x = paddle.to_tensor([-1., 2., 3.]) param = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.2)) out = paddle.static.nn.prelu(x, 'all', param) # [-0.2, 2., 3.] """ check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu') helper = LayerHelper('prelu', **locals()) if mode not in ['all', 'channel', 'element']: raise ValueError('mode should be one of all, channel, element.') alpha_shape = [1] if mode == 'channel': true_data_format = [ 'NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC' ] if data_format not in true_data_format: raise ValueError( "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', " "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format)) data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC' assert len( x.shape ) >= 2, "The size of input shape should be equal or larger than 2 in prelu() when mode is 'channel'" #NOTE(zhiqiu): The alpha_shape should be [1, channel] + [1] * len(x.shape[2:]). # To be consistent with Prelu, it is simplified. #NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version. #NOTE(GuoxiaWang): support NHWC data format if data_format == 'NHWC': alpha_shape = [1, 1, 1, x.shape[-1]] else: alpha_shape = [1, x.shape[1], 1, 1] elif mode == 'element': assert len( x.shape ) >= 1, "The size of input shape should be equal or larger than 1 in prelu() when mode is 'element'" alpha_shape = [1] + list(x.shape)[1:] dtype = helper.input_dtype(input_param_name='x') alpha = helper.create_parameter( attr=helper.param_attr, shape=alpha_shape, dtype=dtype, is_bias=False, default_initializer=Constant(0.25)) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="prelu", inputs={"X": x, 'Alpha': alpha}, attrs={"mode": mode, "data_format": data_format}, outputs={"Out": out}) return out @templatedoc() def brelu(x, t_min=0.0, t_max=24.0, name=None): """ ${comment} Args: x(${x_type}): ${x_comment} t_min(${t_min_type}|0.0): ${t_min_comment} t_max(${t_max_type}|24.0): ${t_max_comment} name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: ${out_type}: ${out_comment} Examples: .. code-block:: python import paddle.fluid as fluid import paddle import numpy as np paddle.enable_static() input_brelu = np.array([[-1,6],[1,15.6]]) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(input_brelu) y = fluid.layers.brelu(x, t_min=1.0, t_max=10.0) print(y.numpy()) #[[ 1. 6.] #[ 1. 10.]] """ if in_dygraph_mode(): return _C_ops.brelu(x, 't_min', t_min, 't_max', t_max) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu') helper = LayerHelper('brelu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='brelu', inputs={'X': x}, outputs={'Out': out}, attrs={'t_min': t_min, 't_max': t_max}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.leaky_relu") @templatedoc() def leaky_relu(x, alpha=0.02, name=None): """ ${comment} Args: x(${x_type}): ${x_comment} alpha(${alpha_type}|0.02): ${alpha_comment} name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: output(${out_type}): ${out_comment} Examples: .. code-block:: python import paddle x = paddle.to_tensor([[-1, 2], [3, -4]], dtype='float32') y = paddle.fluid.layers.leaky_relu(x, alpha=0.1) print(y) # [[-0.1, 2], [3, -0.4]] """ return paddle.nn.functional.leaky_relu(x, alpha, name) def soft_relu(x, threshold=40.0, name=None): r""" SoftRelu Activation Operator. $out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$ Args: x(Variable): Input of soft_relu operator. Data type can be float32, float64. threshold(float, optional): The threshold value of soft_relu, default value being 40.0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Variable(Tensor|LoDTensor)): Output of soft_relu operator, shape and LoD same as input. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import numpy as np import paddle paddle.enable_static() inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32") output = fluid.layers.soft_relu(inputs, threshold=20.0) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) img = np.array([[0, 1],[2, 3]]).astype(np.float32) res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)] """ check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'soft_relu') helper = LayerHelper('soft_relu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='soft_relu', inputs={'X': x}, outputs={'Out': out}, attrs={'threshold': threshold}) return out def flatten(x, axis=1, name=None): r""" **Flatten op** Flatten the input tensor into a 2D matrix. For Example: .. code-block:: text Case 1: Given X.shape = (3, 100, 100, 4) and axis = 2 We get: Out.shape = (3 * 100, 4 * 100) Case 2: Given X.shape = (3, 100, 100, 4) and axis = 0 We get: Out.shape = (1, 3 * 100 * 100 * 4) Args: x (Variable): A tensor of rank >= axis. A tensor with type float32, float64, int8, int32, int64, uint8. axis (int): Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R is the rank of the input tensor. Default: 1. name(str, Optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None. Returns: Variable: A 2D tensor with the contents of the input tensor, with input \ dimensions up to axis flattened to the outer dimension of \ the output and remaining input dimensions flattened into the \ inner dimension of the output. A Tensor with type same as input x. Raises: ValueError: If x is not a variable. ValueError: If axis is not in range [0, rank(x)]. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() x = fluid.data(name="x", shape=[4, 4, 3], dtype="float32") # x shape is [4, 4, 3] out = fluid.layers.flatten(x=x, axis=2) # out shape is [16, 3] """ check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'], 'flatten') helper = LayerHelper('flatten', **locals()) if not (isinstance(x, Variable)): raise ValueError("The input x should be a Variable") if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0: raise ValueError("The axis should be a int, and in range [0, rank(x)]") out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) helper.append_op( type='flatten2', inputs={"X": x}, outputs={'Out': out, 'XShape': x_shape}, attrs={"axis": axis}) return out def stack(x, axis=0, name=None): """ This OP stacks all the inputs :code:`x` along axis. .. code-block:: text Case 1: Input: x[0].shape = [1, 2] x[0].data = [ [1.0 , 2.0 ] ] x[1].shape = [1, 2] x[1].data = [ [3.0 , 4.0 ] ] x[2].shape = [1, 2] x[2].data = [ [5.0 , 6.0 ] ] Attrs: axis = 0 Output: Out.dims = [3, 1, 2] Out.data =[ [ [1.0, 2.0] ], [ [3.0, 4.0] ], [ [5.0, 6.0] ] ] Case 2: Input: x[0].shape = [1, 2] x[0].data = [ [1.0 , 2.0 ] ] x[1].shape = [1, 2] x[1].data = [ [3.0 , 4.0 ] ] x[2].shape = [1, 2] x[2].data = [ [5.0 , 6.0 ] ] Attrs: axis = 1 or axis = -2 Output: Out.shape = [1, 3, 2] Out.data =[ [ [1.0, 2.0] [3.0, 4.0] [5.0, 6.0] ] ] Args: x (list(Variable)|tuple(Variable)): Input :code:`x` can be a :code:`list` or :code:`tuple` of Tensors, the shapes of all these Tensors must be the same. Supposing input is N dims Tensors :math:`[d_0, d_1, ..., d_{n-1}]`, the output is N+1 dims Tensor :math:`[d_0, d_1, d_{axis-1}, len(x), d_{axis}, ..., d_{n-1}]`. Supported data types: float32, float64, int32, int64. axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``, where ``R`` is the number of dimensions of the first input tensor ``x[0]``. If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0. name (str, optional): Please refer to :ref:`api_guide_Name`, Default None. Returns: Variable: The stacked Tensor, has same data type with input Tensors. Output dim is :math:`rank(x[0])+1`. Examples: .. code-block:: python import paddle.fluid as fluid import paddle.fluid.layers as layers # set batch size=None x1 = fluid.data(name='x1', shape=[None, 1, 2], dtype='int32') x2 = fluid.data(name='x2', shape=[None, 1, 2], dtype='int32') # stack Tensor list data = layers.stack([x1,x2]) # stack according to axis 0, data.shape=[2, None, 1, 2] data = layers.stack([x1,x2], axis=1) # stack according to axis 1, data.shape=[None, 2, 1, 2] """ axis = 0 if axis is None else axis if in_dygraph_mode(): return _C_ops.stack(x, 'axis', axis) if not isinstance(x, list) and not isinstance(x, tuple): # NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc. # In that case, Variable is array of tensors indeed. if isinstance(x, Variable) and x.desc.type( ) == core.VarDesc.VarType.LOD_TENSOR_ARRAY: x = [x] else: raise TypeError("The type of '%s' in %s must be %s, but received %s" % ('x', 'stack', 'list[Tensor], tuple[Tensor] or TensorArray', type(x))) helper = LayerHelper('stack', **locals()) out = helper.create_variable_for_type_inference(x[0].dtype) if x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \ "number of the elements must be 1, but received %s." % len(x) out_index = helper.create_variable_for_type_inference(dtype="int32") for i in x: check_variable_and_dtype(i, 'x', \ ['float16', 'float32', 'float64', 'int32', 'int64'], 'stack') helper.append_op( type='tensor_array_to_tensor', inputs={'X': x[0]}, outputs={'Out': [out], 'OutIndex': [out_index]}, attrs={'axis': axis, 'use_stack': True}) else: helper.append_op( type='stack', inputs={'X': x}, outputs={'Y': out}, attrs={'axis': axis}) return out @templatedoc(op_type="filter_by_instag") def filter_by_instag(ins, ins_tag, filter_tag, is_lod, out_val_if_empty=0): """ **Filter By Instag Layer** This function filter a batch of ins by instag, There are multiple ins, and every ins belongs to some tags. We can specify some tags we want. So the ins which belongs to that tags remains in the output, and others removed. For example, one batch has 4 ins. Every ins has its tag list. | Ins | Ins_Tag | |:-----:|:------:| | 0 | 0, 1 | | 1 | 1, 3 | | 2 | 0, 3 | | 3 | 2, 6 | And Lod is [1,1,1,1] And the filter tags [1] From the definition above, ins which has tag 1 can pass the filter So Ins 0 and Ins 1 can pass and be seen in the output, Ins 2 and 3 cannot pass because they do not has tag 1. Actually, if is_lod is false, it is normal tensor that equals to lod_tensor with all 1, similar to the example above. Args: ins (Variable): Input Variable (LoDTensor), usually it is 2D tensor And first dimension can have lod info or not. ins_tag (Variable): Input Variable (LoDTensor), usually it is 1D list And split them by lod info filter_tag (Variable): Input Variable (1D Tensor/List), usually it is list that holds the tags. is_lod (Bool): Boolean value to indicate ins is lod tensor or not. out_val_if_empty(Int64): If the output after filter is empty, this value will be set to Output tensor. Returns: Variable: filtered ins (LoDTensor) and loss weight (Tensor) Examples: .. code-block:: python import paddle.fluid.layers as layers ins = layers.data(name='Ins', shape=[-1,32], lod_level=0, dtype='float64') ins_tag = layers.data(name='Ins_tag', shape=[-1,16], lod_level=0, dtype='int64') filter_tag = layers.data(name='Filter_tag', shape=[-1,16], dtype='int64') out, loss_weight = layers.filter_by_instag(ins, ins_tag, filter_tag, True) """ helper = LayerHelper('filter_by_instag', **locals()) out = helper.create_variable_for_type_inference(dtype=ins.dtype) loss_weight = helper.create_variable_for_type_inference(dtype=np.float64) mmap = helper.create_variable_for_type_inference(dtype=ins_tag.dtype) helper.append_op( type='filter_by_instag', inputs={'Ins': ins, 'Ins_tag': ins_tag, 'Filter_tag': filter_tag}, outputs={'Out': out, 'LossWeight': loss_weight, 'IndexMap': mmap}, attrs={'is_lod': is_lod, 'out_val_if_empty': out_val_if_empty}) return [out, loss_weight] def unstack(x, axis=0, num=None): """ :alias_main: paddle.unstack :alias: paddle.unstack,paddle.tensor.unstack,paddle.tensor.manipulation.unstack :old_api: paddle.fluid.layers.unstack **UnStack Layer** This layer unstacks input Tensor :code:`x` into several Tensors along :code:`axis`. If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x)`. If :code:`num` is None, it would be inferred from :code:`x.shape[axis]`, and if :code:`x.shape[axis]` <= 0 or is unknown, :code:`ValueError` is raised. Args: x (Tensor): Input Tensor. It is a N-D Tensors of data types float32, float64, int32, int64. axis (int): The axis along which the input is unstacked. num (int|None): The number of output variables. Returns: list(Tensor): The unstacked Tensors list. The list elements are N-D Tensors of data types float32, float64, int32, int64. Raises: ValueError: If x.shape[axis] <= 0 or axis is not in range [-D, D). Examples: .. code-block:: python import paddle x = paddle.ones(name='x', shape=[2, 3, 5], dtype='float32') # create a tensor with shape=[2, 3, 5] y = paddle.unstack(x, axis=1) # unstack with second axis, which results 3 tensors with shape=[2, 5] """ if in_dygraph_mode(): if num == None: num = x.shape[axis] if num == 0: return [] return _C_ops.unstack(x, num, 'axis', int(axis), 'num', num) helper = LayerHelper('unstack', **locals()) if num is None: if axis is None or x.shape[axis] <= 0: raise ValueError('unknown unstack number') else: num = x.shape[axis] outs = [] for _ in range(num): outs.append(helper.create_variable_for_type_inference(x.dtype)) helper.append_op( type='unstack', inputs={'X': [x]}, outputs={'Y': outs}, attrs={'axis': axis, 'num': num}) return outs @deprecated(since='2.0.0', update_to="paddle.expand") def expand(x, expand_times, name=None): """ :alias_main: paddle.expand :alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand :old_api: paddle.fluid.layers.expand This operation tiles ``x`` multiple times according to the parameter ``expand_times``. The times number for each dimension of ``x`` is set by the parameter ``expand_times``. The rank of ``x`` should be less than or equal to 6. Please note that size of ``expand_times`` must be the same with X's rank. Following is a using case: .. code-block:: text Input(X) is a 3-D tensor with shape [2, 3, 1]: [ [[1], [2], [3]], [[4], [5], [6]] ] Attr(expand_times): [1, 2, 2] Output(Out) is a 3-D tensor with shape [2, 6, 2]: [ [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] ] Args: x (Variable): A ``Tensor`` or ``LoDTensor`` with dimension in [1, 6]. The data type is ``bool``, ``float32``, ``float64`` or ``int32`` . expand_times (list|tuple|Variable): The data type is ``int32`` . If ``expand_times`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``expand_times`` is an Variable, it should be an 1-D Tensor. Expand times number for each dimension of ``x`` . name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. After expanding, size of each dimension of output is equal to the size of the corresponding dimension of ``x`` multiplying the corresponding value given by ``expand_times`` . Raises: TypeError: The type of ``expand_times`` must be list, tuple or Variable. ValueError: The elements of ``expand_times`` cannot be negative. Examples: .. code-block:: python import paddle.fluid as fluid # example 1: data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0) expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2]) # the shape of expanded_1 is [2, 6, 2]. # example 2: data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3) expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4) expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times) # the shape of expanded_2 is [48, 56]. """ if in_dygraph_mode(): attrs = () expand_times_tensor = None if isinstance(expand_times, (list, tuple)): expand_times = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in expand_times ] attrs += ('expand_times', expand_times) elif isinstance(expand_times, Variable): expand_times_tensor = expand_times expand_times_tensor.stop_gradient = True return _C_ops.expand(x, expand_times_tensor, *attrs) inputs = {"X": [x]} attrs = {} check_variable_and_dtype( x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'expand') check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand') if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True: raise ValueError( "expand op bool date type must set the stop_gradient to be False") helper = LayerHelper('expand', input=x, **locals()) def get_attr_expand_times(list_expand_times): attrs_expand_times = [] for idx, times in enumerate(list_expand_times): if isinstance(times, Variable): attrs_expand_times.append(-1) else: attrs_expand_times.append(times) assert times > 0, ( "Each element given in expand_times must not be negative.") return attrs_expand_times if isinstance(expand_times, Variable): expand_times.stop_gradient = True inputs['ExpandTimes'] = expand_times elif isinstance(expand_times, (list, tuple)): attrs['expand_times'] = get_attr_expand_times(expand_times) if utils._contain_var(expand_times): inputs['expand_times_tensor'] = utils._convert_to_tensor_list( expand_times) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='expand', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @deprecated(since='2.0.0', update_to="paddle.expand_as") def expand_as(x, target_tensor, name=None): """ :alias_main: paddle.expand_as :alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as :old_api: paddle.fluid.layers.expand_as expand_as operator tiles to the input by given expand tensor. You should set expand tensor for each dimension by providing tensor 'target_tensor'. The rank of X should be in [1, 6]. Please note that size of 'target_tensor' must be the same with X's rank. Following is a using case: .. code-block:: text Input(X) is a 3-D tensor with shape [2, 3, 1]: [ [[1], [2], [3]], [[4], [5], [6]] ] target_tensor's shape: [2, 6, 2] Output(Out) is a 3-D tensor with shape [2, 6, 2]: [ [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] ] Args: x (Variable): A Tensor with dtype float64, float32, int32. A tensor with rank in [1, 6]. target_tensor (Variable): A Tensor with dtype float64, float32, int32. target_tensor for expanding to Input(X). Only use target_tensor'shape. Returns: Variable: A Tensor with dtype float64, float32, int32. After expanding, size of each dimension of Output(Out) is equal to the size of the corresponding dimension of target_tensor multiplying the corresponding value given by target_tensor. Examples: .. code-block:: python import paddle import paddle.fluid as fluid import numpy as np paddle.enable_static() data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64') target_tensor = fluid.layers.data( name="target_tensor", shape=[-1,20], dtype='float64') result = fluid.layers.expand_as(x=data, target_tensor=target_tensor) use_cuda = False place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) x = np.random.rand(3,10) y = np.random.rand(3,20) output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name]) print(output[0].shape) #(3,20) """ if in_dygraph_mode(): return _C_ops.expand_as(x, target_tensor) check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as') check_variable_and_dtype(target_tensor, 'target_tensor', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as') helper = LayerHelper('expand_as', input=x, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) inputs = {'X': x, 'target_tensor': target_tensor} helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out}) return out from paddle.fluid.framework import convert_np_dtype_to_dtype_ @deprecated(since='1.8.0', update_to="paddle.uniform") @templatedoc() def uniform_random_batch_size_like(input, shape, dtype='float32', input_dim_idx=0, output_dim_idx=0, min=-1.0, max=1.0, seed=0): """ This OP initializes a variable with random values sampled from a uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension. .. code-block:: text *Case 1: Given: input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3] shape=[2,4] result.shape[output_dim_idx] = input.shape[input_dim_idx], output_dim_idx = 0, input_dim_idx = 0, result.shape[0] = input.shape[0], then: result=[[ 0.3443427 , -0.23056602, 0.3477049 , 0.06139076]] # result.shape=[1,4] *Case 2: Given: input =[[0.946741 , 0.1357001 , 0.38086128]] # input.shape=[1,3] shape=[2,4] input_dim_idx=1 output_dim_idx=1 result.shape[output_dim_idx] = input.shape[input_dim_idx], output_dim_idx = 1, input_dim_idx = 1, result.shape[1] = input.shape[1], then: result=[[-0.23133647, -0.84195036, 0.21441269], [-0.08774924, 0.25605237, -0.09403259]] # result.shape=[2,3] Args: input (Variable): A Tensor. Supported data types: float32, float64. shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int. input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default 0. output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0. min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0. max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0. seed (int, optional): Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time. dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32. Returns: Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() # example 1: input = fluid.data(name="input", shape=[1, 3], dtype='float32') out_1 = fluid.layers.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4] # example 2: out_2 = fluid.layers.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3] """ check_variable_and_dtype(input, 'Input', ("float32", 'float64', "uint16"), 'uniform_random_batch_size_like') check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like') check_dtype(dtype, 'dtype', ('float32', 'float64', "uint16"), 'uniform_random_batch_size_like') helper = LayerHelper('uniform_random_batch_size_like', **locals()) out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='uniform_random_batch_size_like', inputs={'Input': input}, outputs={'Out': out}, attrs={ 'shape': shape, 'input_dim_idx': input_dim_idx, 'output_dim_idx': output_dim_idx, 'min': min, 'max': max, 'seed': seed, 'dtype': c_dtype }) return out @deprecated(since="2.0.0", update_to="paddle.normal") @templatedoc() def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32', name=None): """ This OP returns a Tensor filled with random values sampled from a Gaussian distribution, with ``shape`` and ``dtype``. Args: shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape`` is a list or tuple, the elements of it should be integers or Tensors (with the shape [1], and the data type int32 or int64). If ``shape`` is a Tensor, it should be a 1-D Tensor(with the data type int32 or int64). mean(float|int, optional): Mean of the output tensor, default is 0.0. std(float|int, optional): Standard deviation of the output tensor, default is 1.0. seed(int, optional): ${seed_comment} dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of the output Tensor. Supported data types: float32, float64. Default is float32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A Tensor filled with random values sampled from a Gaussian distribution, with ``shape`` and ``dtype``. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() # example 1: # attr shape is a list which doesn't contain Tensor. result_1 = fluid.layers.gaussian_random(shape=[3, 4]) # [[-0.31261674, 1.8736548, -0.6274357, 0.96988016], # [-0.12294637, 0.9554768, 1.5690808, -1.2894802 ], # [-0.60082096, -0.61138713, 1.5345167, -0.21834975]] # example 2: # attr shape is a list which contains Tensor. dim_1 = fluid.layers.fill_constant([1], "int64", 2) dim_2 = fluid.layers.fill_constant([1], "int32", 3) result_2 = fluid.layers.gaussian_random(shape=[dim_1, dim_2]) # [[ 0.51398206, -0.3389769, 0.23597084], # [ 1.0388143, -1.2015356, -1.0499583 ]] # example 3: # attr shape is a Tensor, the data type must be int64 or int32. var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64") result_3 = fluid.layers.gaussian_random(var_shape) # if var_shape's value is [2, 3] # result_3 is: # [[-0.12310527, 0.8187662, 1.923219 ] # [ 0.70721835, 0.5210541, -0.03214082]] .. code-block:: python # declarative mode # required: skiptest import numpy as np from paddle import fluid x = fluid.layers.gaussian_random((2, 3), std=2., seed=10) place = fluid.CPUPlace() exe = fluid.Executor(place) start = fluid.default_startup_program() main = fluid.default_main_program() exe.run(start) x_np, = exe.run(main, feed={}, fetch_list=[x]) x_np # array([[2.3060477, 2.676496 , 3.9911983], # [0.9990833, 2.8675377, 2.2279181]], dtype=float32) .. code-block:: python # imperative mode import numpy as np from paddle import fluid import paddle.fluid.dygraph as dg place = fluid.CPUPlace() with dg.guard(place) as g: x = fluid.layers.gaussian_random((2, 4), mean=2., dtype="float32", seed=10) x_np = x.numpy() x_np # array([[2.3060477 , 2.676496 , 3.9911983 , 0.9990833 ], # [2.8675377 , 2.2279181 , 0.79029655, 2.8447366 ]], dtype=float32) """ if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) return _C_ops.gaussian_random('shape', shape, 'mean', float(mean), 'std', float(std), 'seed', seed, 'dtype', dtype) check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn') check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn') inputs = {} attrs = { 'mean': mean, 'std': std, 'seed': seed, 'dtype': dtype, 'use_mkldnn': False } utils.get_shape_tensor_inputs( inputs=inputs, attrs=attrs, shape=shape, op_type='gaussian_random/randn') helper = LayerHelper('gaussian_random', **locals()) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='gaussian_random', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @templatedoc() def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'): """ This op is used for sampling id from multinomial distribution from the input, sampling one id for one sample. Parameters: x (Variable): 2-D tensor, [batch_size, input_feature_dimensions] min (Float): minimum , default 0.0. max (Float): maximum, default 1.0. seed (Float): Random seed, default 0. if seed is not 0, will generate same number every time. dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc Returns: Variable: sampling tensor. Examples: .. code-block:: python import paddle.fluid as fluid x = fluid.data( name="X", shape=[13, 11], dtype='float32') out = fluid.layers.sampling_id(x) """ helper = LayerHelper('sampling_id', **locals()) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sampling_id', inputs={'X': x}, outputs={'Out': out}, attrs={'min': min, 'max': max, 'seed': seed}) return out @deprecated(since='1.8.0', update_to="paddle.normal") @templatedoc() def gaussian_random_batch_size_like(input, shape, input_dim_idx=0, output_dim_idx=0, mean=0.0, std=1.0, seed=0, dtype='float32'): """ ${comment} Args: input (Variable): ${input_comment} shape (tuple|list): ${shape_comment} input_dim_idx (int): ${input_dim_idx_comment} output_dim_idx (int): ${output_dim_idx_comment} mean (float): ${mean_comment} std (float): ${std_comment} seed (int): ${seed_comment} dtype(np.dtype|core.VarDesc.VarType|str): The type of output data, float32 or float_64. Returns: out (Variable): ${out_comment} Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() input = fluid.data(name="input", shape=[13, 11], dtype='float32') out = fluid.layers.gaussian_random_batch_size_like( input, shape=[-1, 11], mean=1.0, std=2.0) """ helper = LayerHelper('gaussian_random_batch_size_like', **locals()) check_type(input, 'input', (Variable), 'fluid.layers.gaussian_random_batch_size_like') check_type(shape, 'shape', (list, tuple), 'fluid.layers.gaussian_random_batch_size_like') check_dtype(dtype, 'dtype', ['float16', 'float32', 'int'], 'fluid.layers.gaussian_random_batch_size_like') out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='gaussian_random_batch_size_like', inputs={'Input': input}, outputs={'Out': out}, attrs={ 'shape': shape, 'input_dim_idx': input_dim_idx, 'output_dim_idx': output_dim_idx, 'mean': mean, 'std': std, 'seed': seed, 'dtype': c_dtype }) return out @templatedoc() def sum(x): """ ${comment} Case 1: :: Input: Input. Shape = [2, 3] Input = [[1, 2, 3], [4, 5, 6]] Output: The output. Shape = [2, 3] Output = [[1, 2, 3], [4, 5, 6]] Case 2: :: Input: First input: Input1. Shape = [2, 3] Input1 = [[1, 2, 3], [4, 5, 6]] The second input: Input2. Shape = [2, 3] Input2 = [[7, 8, 9], [10, 11, 12]] Output: The output. Shape = [2, 3] Output = [[8, 10, 12], [14, 16, 18]] Args: x (Variable|list(Variable)): ${x_comment} Returns: Variable: ${out_comment} Examples: .. code-block:: python import paddle.fluid as fluid input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5) input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3) sum = fluid.layers.sum([input0, input1]) # You can print out 'sum' via executor. out = fluid.layers.Print(sum, message="the sum of input0 and input1: ") exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_main_program()) # The printed result is: # 1570701754 the sum of input0 and input1: The place is:CPUPlace # Tensor[sum_0.tmp_0] # shape: [2,3,] # dtype: l # data: 8,8,8,8,8,8, # the sum of input0 and input1 is 2-D Tensor with shape [2,3]. # dtype is the corresponding C++ data type, which may vary in different environments. # Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t, # so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux, # and '__int64' on Windows. They both represent 64-bit integer variables. """ return paddle.add_n(x) @templatedoc() def slice(input, axes, starts, ends): """ This operator produces a slice of ``input`` along multiple axes. Similar to numpy: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and end dimension for each axis in the list of axes and Slice uses this information to slice the input data tensor. If a negative value is passed to ``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the axis :math:`i-1` (here 0 is the initial position). If the value passed to ``starts`` or ``ends`` is greater than n (the number of elements in this dimension), it represents n. For slicing to the end of a dimension with unknown size, it is recommended to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``. Following examples will explain how slice works: .. code-block:: text Case1: Given: data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] axes = [0, 1] starts = [1, 0] ends = [2, 3] Then: result = [ [5, 6, 7], ] Case2: Given: data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] axes = [0, 1] starts = [0, 1] ends = [-1, 1000] # -1 denotes the reverse 0th position of dimension 0. Then: result = [ [2, 3, 4], ] # result = data[0:1, 1:4] Args: input (Tensor): A ``Tensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``. axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to . starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``starts`` is an Tensor, it should be an 1-D Tensor. It represents starting indices of corresponding axis in ``axes``. ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``ends`` is an Tensor, it should be an 1-D Tensor . It represents ending indices of corresponding axis in ``axes``. Returns: Tensor: A ``Tensor``. The data type is same as ``input``. Raises: TypeError: The type of ``starts`` must be list, tuple or Tensor. TypeError: The type of ``ends`` must be list, tuple or Tensor. Examples: .. code-block:: python import paddle input = paddle.rand(shape=[4, 5, 6], dtype='float32') # example 1: # attr starts is a list which doesn't contain tensor. axes = [0, 1, 2] starts = [-3, 0, 2] ends = [3, 2, 4] sliced_1 = paddle.slice(input, axes=axes, starts=starts, ends=ends) # sliced_1 is input[0:3, 0:2, 2:4]. # example 2: # attr starts is a list which contain tensor. minus_3 = paddle.full([1], -3, "int32") sliced_2 = paddle.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends) # sliced_2 is input[0:3, 0:2, 2:4]. """ if in_dygraph_mode(): attrs = () starts_tensor = None ends_tensor = None if isinstance(axes, (list, tuple)): axes = list(axes) if len(axes) == 0: raise ValueError( "Input axes should not be an empty list/tuple.") for i in range(len(axes)): if axes[i] < 0: axes[i] = max(0, axes[i] + len(input.shape)) else: axes[i] = min(len(input.shape) - 1, axes[i]) else: raise ValueError( "Input axes must be a python list or tuple, but reveived {}". format(type(axes))) infer_flags = list(1 for i in range(len(axes))) if isinstance(starts, (list, tuple)): starts = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in starts ] attrs += ('starts', starts) elif isinstance(starts, Variable): starts_tensor = starts starts.stop_gradient = True infer_flags = list(-1 for i in range(len(axes))) if isinstance(ends, (list, tuple)): ends = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in ends ] attrs += ('ends', ends) elif isinstance(ends, Variable): ends_tensor = ends ends_tensor.stop_gradient = True infer_flags = list(-1 for i in range(len(axes))) return _C_ops.slice(input, starts_tensor, ends_tensor, 'axes', axes, 'infer_flags', infer_flags, *attrs) if not isinstance(starts, (list, tuple, Variable)): raise ValueError( "Input starts must be an Variable, python list or tuple.") if not isinstance(ends, (list, tuple, Variable)): raise ValueError( "Input ends must be an Variable, python list or tuple.") helper = LayerHelper('slice', **locals()) inputs = {'Input': input} attrs = {'axes': axes} infer_flags = list(1 for i in range(len(axes))) # starts if isinstance(starts, Variable): starts.stop_gradient = True inputs['StartsTensor'] = starts infer_flags = list(-1 for i in range(len(axes))) elif isinstance(starts, (list, tuple)): attrs['starts'] = [] if utils._contain_var(starts): inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts) for i, dim in enumerate(starts): if isinstance(dim, Variable): attrs['starts'].append(-1) infer_flags[i] = -1 else: attrs['starts'].append(dim) else: attrs['starts'] = starts # ends if isinstance(ends, Variable): ends.stop_gradient = True inputs['EndsTensor'] = ends infer_flags = list(-1 for i in range(len(axes))) elif isinstance(ends, (list, tuple)): attrs['ends'] = [] if utils._contain_var(ends): inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends) for i, dim in enumerate(ends): if isinstance(dim, Variable): attrs['ends'].append(-1) infer_flags[i] = -1 else: attrs['ends'].append(dim) else: attrs['ends'] = ends # infer_flags attrs['infer_flags'] = infer_flags out = helper.create_variable_for_type_inference( dtype=helper.input_dtype('input')) helper.append_op( type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out}) return out @deprecated(since='2.0.0', update_to="paddle.strided_slice") def strided_slice(input, axes, starts, ends, strides): """ :alias_main: paddle.strided_slice :alias: paddle.strided_slice,paddle.tensor.strided_slice,paddle.tensor.manipulation.strided_slice :old_api: paddle.fluid.layers.strided_slice This operator produces a slice of ``input`` along multiple axes. Similar to numpy: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and end dimension for each axis in the list of axes and Slice uses this information to slice the input data tensor. If a negative value is passed to ``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of slicing and if the ``strides`` is negative, slice operation is in the opposite direction. If the value passed to ``starts`` or ``ends`` is greater than n (the number of elements in this dimension), it represents n. For slicing to the end of a dimension with unknown size, it is recommended to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``. Following examples will explain how strided_slice works: .. code-block:: text Case1: Given: data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] axes = [0, 1] starts = [1, 0] ends = [2, 3] strides = [1, 1] Then: result = [ [5, 6, 7], ] Case2: Given: data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] axes = [0, 1] starts = [0, 1] ends = [2, 0] strides = [1, -1] Then: result = [ [8, 7, 6], ] Case3: Given: data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] axes = [0, 1] starts = [0, 1] ends = [-1, 1000] strides = [1, 3] Then: result = [ [2], ] Args: input (Variable): An N-D ``Tensor`` or ``LoDTensor`` . The data type is ``bool``, ``float32``, ``float64``, ``int32`` or ``int64``. axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to. It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`. starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor. It represents starting indices of corresponding axis in ``axes``. ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor . It represents ending indices of corresponding axis in ``axes``. strides (list|tuple|Variable): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``strides`` is an Variable, it should be an 1-D Tensor . It represents slice step of corresponding axis in ``axes``. Returns: Variable: A ``Tensor`` or ``LoDTensor`` with the same dimension as ``input``. The data type is same as ``input``. Raises: TypeError: The type of ``starts`` must be list, tuple or Variable. TypeError: The type of ``ends`` must be list, tuple or Variable. TypeError: The type of ``strides`` must be list, tuple or Variable. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() input = fluid.data( name="input", shape=[3, 4, 5, 6], dtype='float32') # example 1: # attr starts is a list which doesn't contain tensor Variable. axes = [0, 1, 2] starts = [-3, 0, 2] ends = [3, 2, 4] strides_1 = [1, 1, 1] strides_2 = [1, 1, 2] sliced_1 = fluid.layers.strided_slice(input, axes=axes, starts=starts, ends=ends, strides=strides_1) # sliced_1 is input[:, 0:3:1, 0:2:1, 2:4:1]. # example 2: # attr starts is a list which contain tensor Variable. minus_3 = fluid.layers.fill_constant([1], "int32", -3) sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2) # sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2]. """ helper = LayerHelper('strided_slice', **locals()) check_variable_and_dtype(input, 'input', ['bool', 'float32', 'float64', 'int32', 'int64'], 'strided_slice') check_type(axes, 'axes', (list, tuple), 'strided_slice') check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice') check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice') check_type(strides, 'strides', (list, tuple, Variable), 'strided_slice') def check_list_elements_dtype(list_input, input_name): if isinstance(list_input, Variable): check_dtype(list_input.dtype, input_name, ['int32'], 'strided_slice') else: for i, var in enumerate(list_input): var_name = input_name + '[' + str(i) + ']' if isinstance(var, Variable): check_dtype(var.dtype, var_name, ['int32'], 'strided_slice') check_list_elements_dtype(axes, 'axes') check_list_elements_dtype(starts, 'starts') check_list_elements_dtype(ends, 'ends') check_list_elements_dtype(strides, 'strides') def get_new_list_tensor(old_list): new_list_tensor = [] for dim in old_list: if isinstance(dim, Variable): dim.stop_gradient = True new_list_tensor.append(dim) else: assert (isinstance(dim, int)) temp_out = helper.create_variable_for_type_inference('int32') fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out) new_list_tensor.append(temp_out) return new_list_tensor inputs = {'Input': input} attrs = {'axes': axes} infer_flags = list(1 for i in range(len(axes))) if in_dygraph_mode(): inputs = {'Input': input} attrs = { 'axes': axes, 'starts': starts, 'ends': ends, 'strides': strides, 'infer_flags': infer_flags } else: # starts if isinstance(starts, Variable): starts.stop_gradient = True inputs['StartsTensor'] = starts elif isinstance(starts, (list, tuple)): attrs['starts'] = [] if utils._contain_var(starts): inputs['StartsTensorList'] = get_new_list_tensor(starts) for i, dim in enumerate(starts): if isinstance(dim, Variable): attrs['starts'].append(-1) infer_flags[i] = -1 else: attrs['starts'].append(dim) else: attrs['starts'] = starts # ends if isinstance(ends, Variable): ends.stop_gradient = True inputs['EndsTensor'] = ends elif isinstance(ends, (list, tuple)): attrs['ends'] = [] if utils._contain_var(ends): inputs['EndsTensorList'] = get_new_list_tensor(ends) for i, dim in enumerate(ends): if isinstance(dim, Variable): attrs['ends'].append(-1) infer_flags[i] = -1 else: attrs['ends'].append(dim) else: attrs['ends'] = ends # strides if isinstance(strides, Variable): strides.stop_gradient = True inputs['StridesTensor'] = strides elif isinstance(strides, (list, tuple)): attrs['strides'] = [] if utils._contain_var(strides): inputs['StridesTensorList'] = get_new_list_tensor(strides) for i, dim in enumerate(strides): if isinstance(dim, Variable): attrs['strides'].append(-1) infer_flags[i] = -1 else: attrs['strides'].append(dim) else: attrs['strides'] = strides attrs['infer_flags'] = infer_flags out = helper.create_variable_for_type_inference( dtype=helper.input_dtype('input')) helper.append_op( type='strided_slice', inputs=inputs, attrs=attrs, outputs={'Out': out}) return out def shape(input): """ :alias_main: paddle.shape :alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape :old_api: paddle.fluid.layers.shape **Shape Layer** Get the shape of the input. .. code-block:: text Case1: Given N-D Tensor: input = [ [1, 2, 3, 4], [5, 6, 7, 8] ] Then: input.shape = [2, 4] Case2: Given SelectedRows: input.rows = [0, 4, 19] input.height = 20 input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor Then: input.shape = [3, 2] Args: input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, float16, float32, float64, int32, int64. If input variable is type of SelectedRows, returns the shape of it's inner tensor. Returns: Variable (Tensor): The shape of the input variable. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle paddle.enable_static() inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32") output = fluid.layers.shape(inputs) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) img = np.ones((3, 100, 100)).astype(np.float32) res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) print(res) # [array([ 3, 100, 100], dtype=int32)] """ if in_dygraph_mode(): out = _C_ops.shape(input) out.stop_gradient = True return out check_variable_and_dtype(input, 'input', [ 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', 'complex128' ], 'shape') helper = LayerHelper('shape', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='shape', inputs={'Input': input}, outputs={'Out': out}, stop_gradient=True) return out def rank(input): """ The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor. Args: input (Tensor): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary. Returns: Tensor, the output data type is int32.: The 0-D tensor with the dimensions of the input Tensor. Examples: .. code-block:: python import paddle input = paddle.rand((3, 100, 100)) rank = paddle.rank(input) print(rank) # 3 """ check_type(input, 'input', (Variable), 'input') ndims = len(input.shape) out = assign(np.array(ndims, 'int32')) return out @deprecated(since="2.0.0", update_to="paddle.numel") def size(input): """ **Size Layer** Returns the number of elements for a tensor, which is a int64 Tensor with shape [1]. Args: input (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64. Returns: Tensor: The number of elements for the input Tensor. Raises: TypeError: ``input`` must be a Tensor and the data type of ``input`` must be one of bool, float16, float32, float64, int32, int64. Examples: .. code-block:: python import paddle import paddle.fluid.layers as layers paddle.enable_static() input = layers.data( name="input", shape=[3, 100], dtype="float32", append_batch_size=False) rank = layers.size(input) # 300 """ if in_dygraph_mode(): return _C_ops.size(input) check_variable_and_dtype( input, 'input', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], "size") helper = LayerHelper('size', **locals()) out = helper.create_variable_for_type_inference(dtype='int64') helper.append_op(type='size', inputs={'Input': input}, outputs={'Out': out}) return out def _elementwise_op(helper): op_type = helper.layer_type x = helper.kwargs.get('x', None) y = helper.kwargs.get('y', None) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype( x, 'x', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype( y, 'y', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], op_type) axis = helper.kwargs.get('axis', -1) use_mkldnn = helper.kwargs.get('use_mkldnn', False) name = helper.kwargs.get('name', None) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type=op_type, inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'axis': axis, 'use_mkldnn': use_mkldnn}) return helper.append_activation(out) def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): """ Scale operator. Putting scale and bias to the input Tensor as following: ``bias_after_scale`` is True: .. math:: Out=scale*X+bias ``bias_after_scale`` is False: .. math:: Out=scale*(X+bias) Args: x(Tensor): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8. scale(float|Tensor): The scale factor of the input, it should be a float number or a Tensor with shape [1] and data type as float32. bias(float): The bias to be put on the input. bias_after_scale(bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances. act(str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: Output tensor of scale operator, with shape and data type same as input. Examples: .. code-block:: python # scale as a float32 number import paddle data = paddle.randn(shape=[2,3], dtype='float32') res = paddle.scale(data, scale=2.0, bias=1.0) .. code-block:: python # scale with parameter scale as a Tensor import paddle data = paddle.randn(shape=[2, 3], dtype='float32') factor = paddle.to_tensor([2], dtype='float32') res = paddle.scale(data, scale=factor, bias=1.0) """ if in_dygraph_mode(): _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale out = _C_ops.scale(x, 'scale', float(_scale), 'bias', float(bias), 'bias_after_scale', bias_after_scale) return dygraph_utils._append_activation_in_dygraph(out) check_variable_and_dtype(x, "x", [ 'float16', 'uint16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8' ], "scale") inputs = {'X': [x]} attrs = { 'bias': float(bias), 'bias_after_scale': bias_after_scale, } if isinstance(scale, Variable): inputs['ScaleTensor'] = [scale] else: attrs['scale'] = float(scale) helper = LayerHelper('scale', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs) return helper.append_activation(out) def elementwise_add(x, y, axis=-1, act=None, name=None): """ Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), "y": np.array([1, 5, 2]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[3], dtype='float32') y = fluid.data(name="y", shape=[3], dtype='float32') z = fluid.layers.elementwise_add(x, y) # z = x + y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # [3., 8., 6.] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.ones((2, 3, 4, 5)).astype('float32'), "y": np.zeros((3, 4)).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[3,4], dtype='float32') z = fluid.layers.elementwise_add(x, y, axis=1) # z = x + y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # z.shape=[2,3,4,5] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), "y": np.random.randint(1, 5, size=[5]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[5], dtype='float32') z = fluid.layers.elementwise_add(x, y, axis=3) # z = x + y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # z.shape=[2,3,4,5] """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_add', use_mkldnn=_global_flags()["FLAGS_use_mkldnn"]) return _elementwise_op(LayerHelper('elementwise_add', **locals())) @deprecated(since="2.0.0", update_to="paddle.divide") def elementwise_div(x, y, axis=-1, act=None, name=None): """ Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), "y": np.array([1, 5, 2]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[3], dtype='float32') y = fluid.data(name="y", shape=[3], dtype='float32') z = fluid.layers.elementwise_div(x, y) # z = x / y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # [2., 0.6, 2.] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.ones((2, 3, 4, 5)).astype('float32'), "y": np.zeros((3, 4)).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[3,4], dtype='float32') z = fluid.layers.elementwise_div(x, y, axis=1) # z = x / y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # z.shape=[2,3,4,5] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), "y": np.random.randint(1, 5, size=[5]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[5], dtype='float32') z = fluid.layers.elementwise_div(x, y, axis=3) # z = x / y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # z.shape=[2,3,4,5] """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_div') return _elementwise_op(LayerHelper('elementwise_div', **locals())) def elementwise_sub(x, y, axis=-1, act=None, name=None): """ Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), "y": np.array([1, 5, 2]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[3], dtype='float32') y = fluid.data(name="y", shape=[3], dtype='float32') z = fluid.layers.elementwise_sub(x, y) # z = x - y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # [1., -2., 2.] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.ones((2, 3, 4, 5)).astype('float32'), "y": np.zeros((3, 4)).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[3,4], dtype='float32') z = fluid.layers.elementwise_sub(x, y, axis=1) # z = x - y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # z.shape=[2,3,4,5] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), "y": np.random.randint(1, 5, size=[5]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[5], dtype='float32') z = fluid.layers.elementwise_sub(x, y, axis=3) # z = x - y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # z.shape=[2,3,4,5] """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_sub') return _elementwise_op(LayerHelper('elementwise_sub', **locals())) @deprecated(since="2.0.0", update_to="paddle.multiply") def elementwise_mul(x, y, axis=-1, act=None, name=None): """ Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), "y": np.array([1, 5, 2]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[3], dtype='float32') y = fluid.data(name="y", shape=[3], dtype='float32') z = fluid.layers.elementwise_mul(x, y) # z = x * y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # [2., 15., 8.] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.ones((2, 3, 4, 5)).astype('float32'), "y": np.zeros((3, 4)).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[3,4], dtype='float32') z = fluid.layers.elementwise_mul(x, y, axis=1) # z = x * y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # z.shape=[2,3,4,5] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), "y": np.random.randint(1, 5, size=[5]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[5], dtype='float32') z = fluid.layers.elementwise_mul(x, y, axis=3) # z = x * y place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) # z.shape=[2,3,4,5] """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_mul') return _elementwise_op(LayerHelper('elementwise_mul', **locals())) def elementwise_max(x, y, axis=-1, act=None, name=None): """ :alias_main: paddle.elementwise_max :alias: paddle.elementwise_max,paddle.tensor.elementwise_max,paddle.tensor.math.elementwise_max :old_api: paddle.fluid.layers.elementwise_max Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), "y": np.array([1, 5, 2]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[3], dtype='float32') y = fluid.data(name="y", shape=[3], dtype='float32') z = fluid.layers.elementwise_max(x, y) place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) #[2, 5, 4] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.ones((2, 3, 4, 5)).astype('float32'), "y": np.zeros((3, 4)).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[3,4], dtype='float32') z = fluid.layers.elementwise_max(x, y, axis=1) place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]] """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_max') return _elementwise_op(LayerHelper('elementwise_max', **locals())) def elementwise_min(x, y, axis=-1, act=None, name=None): """ :alias_main: paddle.elementwise_min :alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min :old_api: paddle.fluid.layers.elementwise_min Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), "y": np.array([1, 5, 2]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[3], dtype='float32') y = fluid.data(name="y", shape=[3], dtype='float32') z = fluid.layers.elementwise_min(x, y) place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) #[1, 3, 2] .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.ones((2, 3, 4, 5)).astype('float32'), "y": np.zeros((3, 4)).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32') y = fluid.data(name="y", shape=[3,4], dtype='float32') z = fluid.layers.elementwise_min(x, y, axis=1) place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]] """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_min') return _elementwise_op(LayerHelper('elementwise_min', **locals())) def elementwise_pow(x, y, axis=-1, act=None, name=None): """ Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), "y": np.array([1, 5, 2]).astype('float32') } paddle.enable_static() x = fluid.data(name="x", shape=[3], dtype='float32') y = fluid.data(name="y", shape=[3], dtype='float32') z = fluid.layers.elementwise_pow(x, y) place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) #[2, 243, 16] """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_pow') return _elementwise_op(LayerHelper('elementwise_pow', **locals())) @deprecated(since="2.0.0", update_to="paddle.remainder") def elementwise_mod(x, y, axis=-1, act=None, name=None): """ Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.array([10, 15, 8]).astype('int32'), "y": np.array([3, 6, 5]).astype('int32') } paddle.enable_static() x = fluid.data(name="x", shape=[3], dtype='int32') y = fluid.data(name="y", shape=[3], dtype='int32') z = fluid.layers.elementwise_mod(x, y) place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) #[1, 3, 3] """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_mod') return _elementwise_op(LayerHelper('elementwise_mod', **locals())) @deprecated(since="2.0.0", update_to="paddle.floor_divide") def elementwise_floordiv(x, y, axis=-1, act=None, name=None): """ Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle def gen_data(): return { "x": np.array([10, 15, 8]).astype('int32'), "y": np.array([3, 7, 5]).astype('int32') } paddle.enable_static() x = fluid.data(name="x", shape=[3], dtype='int32') y = fluid.data(name="y", shape=[3], dtype='int32') z = fluid.layers.elementwise_floordiv(x, y) place = fluid.CPUPlace() exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) print(z_value) #[3, 2, 1] """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_floordiv') return _elementwise_op(LayerHelper('elementwise_floordiv', **locals())) for func in [ elementwise_add, elementwise_div, elementwise_sub, elementwise_mul, elementwise_max, elementwise_pow, elementwise_min, elementwise_mod, elementwise_floordiv, ]: op_proto = OpProtoHolder.instance().get_op_proto(func.__name__) # insert the c++ doc string on top of python doc string func.__doc__ = _generate_doc_string_( op_proto, additional_args_lines=[ "axis (int32, optional): If X.dimension != Y.dimension, \ Y.dimension must be a subsequence of x.dimension. \ And axis is the start dimension index for broadcasting Y onto X. ", "act (string, optional): Activation applied to the output. \ Default is None. Details: :ref:`api_guide_activations_en` ", "name (string, optional): Name of the output. \ Default is None. It's used to print debug info for developers. Details: \ :ref:`api_guide_Name` " ], skip_attrs_set={ "x_data_format", "y_data_format", "axis", "use_quantizer", "mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out" }) + """\n""" + str(func.__doc__) doc_list = func.__doc__.splitlines() for idx, val in enumerate(doc_list): if val.startswith("Warning: ") and val.endswith( " instead." ) and "and will be removed in future versions." in val: doc_list.insert(0, doc_list.pop(idx)) func.__doc__ = "\n" + "\n".join(i for i in doc_list) break for func in []: op_proto = OpProtoHolder.instance().get_op_proto(func.__name__) func.__doc__ = _generate_doc_string_( op_proto, additional_args_lines=[ "act (basestring|None): Activation applied to the output.", "name (basestring|None): Name of the output." ]) func.__doc__ = func.__doc__ + """ Examples: .. code-block:: python import paddle.fluid as fluid # example 1: shape(x) = (2, 3, 4, 5), shape(y) = (2, 3, 4, 5) x0 = fluid.layers.data(name="x0", shape=[2, 3, 4, 5], dtype='float32') y0 = fluid.layers.data(name="y0", shape=[2, 3, 4, 5], dtype='float32') z0 = fluid.layers.%s(x0, y0) # example 2: shape(X) = (2, 3, 4, 5), shape(Y) = (5) x1 = fluid.layers.data(name="x1", shape=[2, 3, 4, 5], dtype='float32') y1 = fluid.layers.data(name="y1", shape=[5], dtype='float32') z1 = fluid.layers.%s(x1, y1) # example 3: shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 x2 = fluid.layers.data(name="x2", shape=[2, 3, 4, 5], dtype='float32') y2 = fluid.layers.data(name="y2", shape=[4, 5], dtype='float32') z2 = fluid.layers.%s(x2, y2, axis=2) # example 4: shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 x3 = fluid.layers.data(name="x3", shape=[2, 3, 4, 5], dtype='float32') y3 = fluid.layers.data(name="y3", shape=[3, 4], dtype='float32') z3 = fluid.layers.%s(x3, y3, axis=1) # example 5: shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 x4 = fluid.layers.data(name="x4", shape=[2, 3, 4, 5], dtype='float32') y4 = fluid.layers.data(name="y4", shape=[2], dtype='float32') z4 = fluid.layers.%s(x4, y4, axis=0) # example 6: shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 x5 = fluid.layers.data(name="x5", shape=[2, 3, 4, 5], dtype='float32') y5 = fluid.layers.data(name="y5", shape=[2], dtype='float32') z5 = fluid.layers.%s(x5, y5, axis=0) """ % (func.__name__, func.__name__, func.__name__, func.__name__, func.__name__, func.__name__) def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): if in_dygraph_mode(): op = getattr(_C_ops, op_name) if binary_op: return op(x, y) else: return op(x) check_variable_and_dtype(x, "x", [ "bool", "int8", "int16", "int32", "int64", "float32", "float64" ], op_name) if y is not None: check_variable_and_dtype(y, "y", [ "bool", "int8", "int16", "int32", "int64", "float32", "float64" ], op_name) if out is not None: check_type(out, "out", Variable, op_name) helper = LayerHelper(op_name, **locals()) if binary_op and x.dtype != y.dtype: raise ValueError( "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." % (op_name, x.dtype, y.dtype)) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) if binary_op: helper.append_op( type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}) else: helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) return out def logical_and(x, y, out=None, name=None): r""" ``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by .. math:: out = x \&\& y .. note:: ``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`. Args: x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``. Examples: .. code-block:: python import paddle x = paddle.to_tensor([True]) y = paddle.to_tensor([True, False, True, False]) res = paddle.logical_and(x, y) print(res) # [True False True False] """ return _logical_op( op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True) def logical_or(x, y, out=None, name=None): """ ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by .. math:: out = x || y .. note:: ``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`. Args: x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``. Examples: .. code-block:: python import paddle import numpy as np x_data = np.array([True, False], dtype=np.bool).reshape(2, 1) y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) res = paddle.logical_or(x, y) print(res) # [[ True True] [ True False]] """ return _logical_op( op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True) def logical_xor(x, y, out=None, name=None): r""" ``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by .. math:: out = (x || y) \&\& !(x \&\& y) .. note:: ``paddle.logical_xor`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`. Args: x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``. Examples: .. code-block:: python import paddle import numpy as np x_data = np.array([True, False], dtype=np.bool).reshape([2, 1]) y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2]) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) res = paddle.logical_xor(x, y) print(res) # [[False, True], [ True, False]] """ return _logical_op( op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True) @templatedoc() def logical_not(x, out=None, name=None): """ ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``out`` is N-dim boolean ``Variable``. Each element of ``out`` is calculated by .. math:: out = !x Args: x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float32, or float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output. name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: ${out_comment} Examples: .. code-block:: python import paddle x = paddle.to_tensor([True, False, True, False]) res = paddle.logical_not(x) print(res) # [False True False True] """ return _logical_op( op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False) @templatedoc() def clip(x, min, max, name=None): """ :old_api: paddle.fluid.layers.clip ${comment} Args: x(${x_type}): ${x_comment} min(float): ${min_comment} max(float): ${max_comment} name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: ${out_comment} Return Type: ${out_type} Examples: .. code-block:: python import paddle.fluid as fluid input = fluid.data( name='data', shape=[1], dtype='float32') reward = fluid.layers.clip(x=input, min=-1.0, max=1.0) """ helper = LayerHelper("clip", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip') if name is None: name = unique_name.generate_with_ignorable_key(".".join( [helper.name, 'tmp'])) out = helper.create_variable( type=x.type, name=name, dtype=x.dtype, persistable=False) helper.append_op( type="clip", inputs={"X": x}, attrs={"min": min, "max": max}, outputs={"Out": out}) return out @templatedoc() def clip_by_norm(x, max_norm, name=None): """ ${comment} Args: x(${x_type}): ${x_comment} max_norm(${max_norm_type}): ${max_norm_comment} name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Tensor: out(${out_type}): ${out_comment} Examples: .. code-block:: python import paddle import paddle.fluid as fluid input = paddle.to_tensor([[2.0, 2.0], [2.0, 2.0]], dtype='float32') reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0) # [[0.5, 0.5], [0.5, 0.5]] """ if in_dygraph_mode(): return _C_ops.clip_by_norm(x, 'max_norm', max_norm) helper = LayerHelper("clip_by_norm", **locals()) check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm') check_type(max_norm, 'max_norm', (float), 'clip_by_norm') if name is None: name = unique_name.generate_with_ignorable_key(".".join( [helper.name, 'tmp'])) out = helper.create_variable( type=x.type, name=name, dtype=x.dtype, persistable=False) helper.append_op( type="clip_by_norm", inputs={"X": x}, attrs={"max_norm": max_norm}, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.mean") @templatedoc() def mean(x, name=None): """ ${comment} Args: x(${x_type}): ${x_comment} name(basestring|None): Name of the output. Returns: out(${out_type}): ${out_comment} Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() input = fluid.layers.data( name='data', shape=[2, 3], dtype='float32') mean = fluid.layers.mean(input) """ if in_dygraph_mode(): return _C_ops.mean(x) helper = LayerHelper("mean", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out}) return out @templatedoc() def merge_selected_rows(x, name=None): """ ${comment} Args: x(${x_type}): ${x_comment} name(basestring|None): Name of the output. Returns: out(${out_type}): ${out_comment} Examples: .. code-block:: python import paddle.fluid as fluid b = fluid.default_main_program().global_block() var = b.create_var( name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS) y = fluid.layers.merge_selected_rows(var) """ helper = LayerHelper("merge_selected_rows", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="merge_selected_rows", inputs={"X": x}, attrs={}, outputs={"Out": out}) return out def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): """ Mul Operator. This operator is used to perform matrix multiplication for input $x$ and $y$. The equation is: .. math:: Out = x * y Both the input $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $x$. Args: x (Variable): The first input Tensor/LoDTensor of mul_op. y (Variable): The second input Tensor/LoDTensor of mul_op. x_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $x$ is a tensor with more than two dimensions, $x$ will be flattened into a two-dimensional matrix first. The flattening rule is: the first `num_col_dims` will be flattened to form the first dimension of the final matrix (the height of the matrix), and the rest `rank(x) - num_col_dims` dimensions are flattened to form the second dimension of the final matrix (the width of the matrix). As a result, height of the flattened matrix is equal to the product of $x$'s first `x_num_col_dims` dimensions' sizes, and width of the flattened matrix is equal to the product of $x$'s last `rank(x) - num_col_dims` dimensions' size. For example, suppose $x$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default is 1. y_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $y$ is a tensor with more than two dimensions, $y$ will be flattened into a two-dimensional matrix first. The attribute `y_num_col_dims` determines how $y$ is flattened. See comments of `x_num_col_dims` for more details. Default is 1. name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None. Returns: Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of mul op. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() dataX = fluid.layers.data(name="dataX", append_batch_size = False, shape=[2, 5], dtype="float32") dataY = fluid.layers.data(name="dataY", append_batch_size = False, shape=[5, 3], dtype="float32") output = fluid.layers.mul(dataX, dataY, x_num_col_dims = 1, y_num_col_dims = 1) """ if in_dygraph_mode(): return _C_ops.mul(x, y, 'x_num_col_dims', x_num_col_dims, 'y_num_col_dims', y_num_col_dims) inputs = {"X": [x], "Y": [y]} attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims} helper = LayerHelper("mul", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul') check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="mul", inputs={"X": x, "Y": y}, attrs=attrs, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.maxout") @templatedoc() def maxout(x, groups, name=None, axis=1): """ ${comment} Args: x(${x_type}): ${x_comment} groups(int): ${groups_comment} axis(int, optional): ${axis_comment} name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Variable: ${out_comment} Raises: ValueError: If `axis` is not 1, -1 or 3. ValueError: If the number of input channels can not be divisible by `groups`. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() input = fluid.data( name='data', shape=[None, 256, 32, 32], dtype='float32') out = fluid.layers.maxout(input, groups=2) """ return paddle.nn.functional.maxout(**locals()) def space_to_depth(x, blocksize, name=None): r""" Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width] This op rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of \ theinput LoDtensor where values from the height and width dimensions are moved to the channel \ dimension. The attr blocksize indicates the input block size. space_to_depth will reorganize the elements of input with shape[batch, channel, height, width] \ according to blocksize to construct output with shape \ [batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]: - Non-overlapping blocks of size block_size x block size are rearranged into depth at each location. - The Y, X coordinates within each block of the input become the high order component of the output channel index - channel should be divisible by square of blocksize - height, width should be divsible by blocksize This OP is useful for resizing the activations between convolutions \ (but keeping all data) .. code-block:: text Given the input x with the shape [1, 1, 4, 4]: x.data = [[[[1, 2, 5, 6], [3, 4, 7, 8], [9, 10, 13, 14], [11, 12, 15, 16]]]] blocksize = 2 then get the output with the shape [1, 4, 2, 2]: out.data = [[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]], [[13, 14], [15, 16]]]] Args: x (Variable): The input, which should be 4 dims Tensor or LodTensor, with the shape \ [batch, channel, height, width] blocksize (int): The blocksize to select the element on each feature map should be > 2 name(str, optional): For detailed information, please refer \ to :ref:`api_guide_Name`. Usually name is no need to set and \ None by default. Returns: The output, which should be 4 dims Tensor or LodTensor, with the shape \ [batch, channel * blocksize * blocksize, height/blocksize, width/blocksize] Return Type: Variable Raises: TypeError: blocksize type must be int64. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import numpy as np import paddle paddle.enable_static() data = fluid.data( name='data', shape=[1, 4, 2, 2], dtype='float32') space_to_depthed = fluid.layers.space_to_depth( x=data, blocksize=2) exe = fluid.Executor(fluid.CPUPlace()) data_np = np.arange(0,16).reshape((1,4,2,2)).astype('float32') print(data_np) #array([[[[ 0., 1.], [ 2., 3.]], # [[ 4., 5.], [ 6., 7.]], # [[ 8., 9.], [10., 11.]], # [[12., 13.], [14., 15.]]]], dtype=float32) out_main = exe.run(fluid.default_main_program(), feed={'data': data_np}, fetch_list=[space_to_depthed]) print(out_main) #[array([[[[ 0.]], [[ 4.]], [[ 1.]], [[ 5.]], # [[ 8.]], [[12.]], [[ 9.]], [[13.]], # [[ 2.]], [[ 6.]], [[ 3.]], [[ 7.]], # [[10.]], [[14.]], [[11.]], [[15.]]]], dtype=float32)] """ helper = LayerHelper("space_to_depth", **locals()) if not (isinstance(blocksize, int)): raise ValueError("blocksize must be a python Int") check_variable_and_dtype(x, 'x', \ ['float16', 'float32', 'float64', 'int32', 'int64'], 'space_to_depth') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="space_to_depth", inputs={"X": x}, attrs={"blocksize": blocksize}, outputs={"Out": out}) return out def affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None, act=None): """ Applies a separate affine transformation to each channel of the input. Useful for replacing spatial batch norm with its equivalent fixed transformation. The input also can be 2D tensor and applies a affine transformation in second dimension. Args: x (Variable): Feature map input can be a 4D tensor with order NCHW or NHWC. It also can be a 2D tensor and the affine transformation is applied in the second dimension.The data type is float32 or float64. scale (Variable): 1D input of shape (C), the c-th element is the scale factor of the affine transformation for the c-th channel of the input.The data type is float32 or float64. bias (Variable): 1D input of shape (C), the c-th element is the bias of the affine transformation for the c-th channel of the input. The data type is float32 or float64. data_layout (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`. If input is 2D Tensor, you can ignore data_layout. name (str, default None): The name of this layer. For more information, please refer to :ref:`api_guide_Name` . act (str, default None): Activation to be applied to the output of this layer. Returns: Variable: A tensor which has the same shape, data layout and data type with x. Examples: .. code-block:: python import numpy as np import paddle.fluid as fluid import paddle.fluid as fluid import paddle paddle.enable_static() use_gpu = False place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) data = fluid.data(name='data', shape=[None, 1, 2, 2], dtype='float32') input_scale = fluid.layers.create_parameter(shape=[1], dtype="float32", default_initializer=fluid.initializer.Constant(2.0)) input_bias = fluid.layers.create_parameter(shape=[1],dtype="float32", default_initializer=fluid.initializer.Constant(0.5)) out = fluid.layers.affine_channel(data,scale=input_scale, bias=input_bias) exe.run(fluid.default_startup_program()) test_program = fluid.default_main_program().clone(for_test=True) [out_array] = exe.run(test_program, fetch_list=out, feed={'data': np.ones([1,1,2,2]).astype('float32')}) # out_array is [[[[2.5, 2.5], # [2.5, 2.5]]]] with shape: [1, 1, 2, 2] """ helper = LayerHelper("affine_channel", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'affine_channel') check_type(scale, 'scale', (Variable, type(None)), 'affine_channel') check_type(bias, 'bias', (Variable, type(None)), 'affine_channel') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="affine_channel", inputs={"X": x, 'Scale': scale, 'Bias': bias}, attrs={"data_layout": data_layout}, outputs={"Out": out}) return helper.append_activation(out) def similarity_focus(input, axis, indexes, name=None): r""" SimilarityFocus Operator Generate a similarity focus mask with the same shape of input using the following method: 1. Extract the 3-D tensor(here the first dimension is BatchSize) corresponding to the axis according to the indexes. For example, if axis=1 and indexes=[a], it will get the matrix T=X[:, a, :, :]. In this case, if the shape of input X is (BatchSize, A, B, C), the shape of tensor T is (BatchSize, B, C). 2. For each index, find the largest numbers in the tensor T, so that the same row and same column has at most one number(what it means is that if the largest number has been found in the i-th row and the j-th column, then the numbers in the i-th row or j-th column will be skipped. And then the next largest number will be selected from the remaining numbers. Obviously there will be min(B, C) numbers), and mark the corresponding position of the 3-D similarity focus mask as 1, otherwise as 0. Do elementwise-or for each index. 3. Broadcast the 3-D similarity focus mask to the same shape of input X. Refer to `Similarity Focus Layer <http://www.aclweb.org/anthology/N16-1108>`_ .. code-block:: text * Example : Given a 4-D tensor x with the shape (BatchSize, C, A, B), where C is the number of channels and the shape of feature map is (A, B): x.shape = (2, 3, 2, 2) x.data = [[[[0.8, 0.1], [0.4, 0.5]], [[0.9, 0.7], [0.9, 0.9]], [[0.8, 0.9], [0.1, 0.2]]], [[[0.2, 0.5], [0.3, 0.4]], [[0.9, 0.7], [0.8, 0.4]], [[0.0, 0.2], [0.4, 0.7]]]] Given axis: 1 (the axis of the channel) Given indexes: [0] then we get a 4-D tensor out with the same shape of input x: out.shape = (2, 3, 2, 2) out.data = [[[[1.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]]], [[[0.0, 1.0], [1.0, 0.0]], [[0.0, 1.0], [1.0, 0.0]], [[0.0, 1.0], [1.0, 0.0]]]] Args: input(Variable): The input tensor variable(default float). It should be a 4-D tensor with shape [BatchSize, A, B, C]. Data type is float32 or float64. axis(int): Indicating the dimension to be selected. It can only be 1, 2 or 3. indexes(list): Indicating the indexes of the selected dimension. Returns: Variable: A tensor variable with the same shape and same type \ as the input. Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() data = fluid.data( name='data', shape=[-1, 3, 2, 2], dtype='float32') fluid.layers.similarity_focus(input=data, axis=1, indexes=[0]) """ helper = LayerHelper('similarity_focus', **locals()) # check attrs check_variable_and_dtype(input, 'input', ['float32', 'float64'], "similarity_focus") check_type(axis, 'axis', int, "similarity_focus") check_type(indexes, 'indexes', list, "similarity_focus") if axis != 1 and axis != 2 and axis != 3: raise ValueError("axis must be 1, 2 or 3.") if len(indexes) == 0: raise ValueError("indexes can not be empty.") out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='similarity_focus', inputs={'X': input}, outputs={'Out': out}, attrs={"axis": axis, "indexes": indexes}) return out def hash(input, hash_size, num_hash=1, name=None): """ This OP hash the input to an integer less than the hash_size. The hash algorithm we used was xxHash - Extremely fast hash algorithm (https://github.com/Cyan4973/xxHash/tree/v0.6.5) Args: input(Variable): A **Two-Dimensional** LoDTensor with type int32, int64. **Only support LoDTensor**. num_hash(int, optional): The times of hash, default is 1. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Variable: A LoDTensor with the same data type as input. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np import paddle paddle.enable_static() place = fluid.core.CPUPlace() x = fluid.data(name="x", shape=[2,2], dtype="int32", lod_level=1) res = fluid.layers.hash(name="res", input=x, hash_size=1000, num_hash=4) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) in1 = np.array([[1,2],[3,4]]).astype("int32") print(in1) x_i = fluid.create_lod_tensor(in1, [[0, 2]], place) res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False) print(np.array(res[0])) # [[[722] # [407] # [337] # [395]] # [[603] # [590] # [386] # [901]]] """ check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'hash') check_type(hash_size, 'hash_size', int, 'hash') check_type(num_hash, 'num_hash', int, 'hash') helper = LayerHelper('hash', **locals()) out = helper.create_variable_for_type_inference( helper.input_dtype(), stop_gradient=True) helper.append_op( type='hash', inputs={'X': input}, outputs={'Out': out}, attrs={'num_hash': num_hash, 'mod_by': hash_size}) return out @templatedoc() def grid_sampler(x, grid, name=None): """ This operation samples input X by using bilinear interpolation based on flow field grid, which is usually generated by :code:`affine_grid` . The grid of shape [N, H, W, 2] is the concatenation of (x, y) coordinates with shape [N, H, W] each, where x is indexing the 4th dimension (in width dimension) of input data x and y is indexing the 3rd dimension (in height dimension), finally results is the bilinear interpolation value of 4 nearest corner points. The output tensor shape will be [N, C, H, W]. .. code-block:: text Step 1: Get (x, y) grid coordinates and scale to [0, H-1/W-1]. .. code-block:: text grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1) grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1) Step 2: Indices input data X with grid (x, y) in each [H, W] area, and bilinear interpolate point value by 4 nearest points. wn ------- y_n ------- en | | | | d_n | | | | x_w --d_w-- grid--d_e-- x_e | | | | d_s | | | | ws ------- y_s ------- wn x_w = floor(x) // west side x coord x_e = x_w + 1 // east side x coord y_n = floor(y) // north side y coord y_s = y_s + 1 // south side y coord d_w = grid_x - x_w // distance to west side d_e = x_e - grid_x // distance to east side d_n = grid_y - y_n // distance to north side d_s = y_s - grid_y // distance to south side wn = X[:, :, y_n, x_w] // north-west point value en = X[:, :, y_n, x_e] // north-east point value ws = X[:, :, y_s, x_w] // south-east point value es = X[:, :, y_s, x_w] // north-east point value output = wn * d_e * d_s + en * d_w * d_s + ws * d_e * d_n + es * d_w * d_n Args: x(Variable): The input tensor, which is a 4-D tensor with shape [N, C, H, W], N is the batch size, C is the channel number, H and W is the feature height and width. The data type is float32 or float64. grid(Variable): Input grid tensor of shape [N, H, W, 2]. The data type is float32 or float64. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Variable: Output of shape [N, C, H, W] data samples input X using bilnear interpolation based on input grid. The data type is same as input tensor. Examples: .. code-block:: python import paddle.fluid as fluid import paddle.fluid as fluid import paddle paddle.enable_static() # use with affine_grid x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32') theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32') grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32]) out = fluid.layers.grid_sampler(x=x, grid=grid) """ helper = LayerHelper("grid_sampler", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler') check_variable_and_dtype(grid, 'grid', ['float32', 'float64'], 'grid_sampler') if not isinstance(x, Variable): return ValueError("The x should be a Variable") if not isinstance(grid, Variable): return ValueError("The grid should be a Variable") out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x, 'Grid': grid} attrs = {'use_cudnn': False} if core.is_compiled_with_rocm() else {} helper.append_op( type='grid_sampler', inputs=ipts, outputs={'Output': out}, attrs=attrs) return out def log_loss(input, label, epsilon=1e-4, name=None): r""" **Negative Log Loss Layer** This layer accepts input predictions and target label and returns the negative log loss. .. math:: Out = -label * \log{(input + \epsilon)} - (1 - label) * \log{(1 - input + \epsilon)} Args: input (Tensor|list): A 2-D tensor with shape [N x 1], where N is the batch size. This input is a probability computed by the previous operator. Data type float32. label (Tensor|list): The ground truth which is a 2-D tensor with shape [N x 1], where N is the batch size. Data type float32. epsilon (float, optional): A small number for numerical stability. Default 1e-4. name(str|None): For detailed information, please refer to :ref:`api_guide_Name` . Usually name is no need to set and None by default. Returns: Tensor, which shape is [N x 1], data type is float32. Examples: .. code-block:: python import paddle import paddle.nn.functional as F label = paddle.randn((10,1)) prob = paddle.randn((10,1)) cost = F.log_loss(input=prob, label=label) """ helper = LayerHelper('log_loss', **locals()) check_variable_and_dtype(input, 'input', ['float32'], 'log_loss') check_variable_and_dtype(label, 'label', ['float32'], 'log_loss') loss = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='log_loss', inputs={'Predicted': [input], 'Labels': [label]}, outputs={'Loss': [loss]}, attrs={'epsilon': epsilon}) return loss def add_position_encoding(input, alpha, beta, name=None): r""" This operator performs weighted sum of input feature at each position (position in the sequence) and the corresponding position encoding. For more details of position encoding, please refer to `Attention Is All You Need <http://arxiv.org/pdf/1706.03762.pdf>`_ . The formula is as follows: .. math:: PE(pos, 2i) &= \\sin{(pos / 10000^{2i / P})} \\\\ PE(pos, 2i + 1) &= \\cos{(pos / 10000^{2i / P})} \\\\ Out(:, pos, i) &= \\alpha * input(:, pos, i) + \\beta * PE(pos, i) Where: - :math:`PE(pos, 2i)` : the value at even index `2i` for encoding of position `pos`. - :math:`PE(pos, 2i + 1)` : the value at odd index `2i+1` for encoding of position `pos` Args: input(Variable): A Tensor or LoDTensor (lod level is 1). If it is a Tensor, the shape should be `[N, M, P]`, where `N` stands for batch size, `M` for sequence length, `P` for the size of feature dimension. If it is a LoDTensor, the shape should be `[N, P]`, where `N` stands for the total sequence lengths in this mini-batch, `P` for the size of feature. The data type should be float32 or float64. alpha(float): Indicate the weight coefficient for `input` when performing weighted sum. beta(float): Indicate the weight coefficient for position encoding when performing weighted sum. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Variable: A Tensor or LoDTensor. It has the same shape, data type and lod as `input`. Examples: .. code-block:: python import paddle tensor = paddle.randn([16, 32, 64]) position_tensor = paddle.fluid.layers.add_position_encoding( input=tensor, alpha=1.0, beta=1.0) """ if in_dygraph_mode(): return _C_ops.add_position_encoding(input, "alpha", alpha, "beta", beta) helper = LayerHelper('add_position_encoding', **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64'], "add_position_encoding") dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type="add_position_encoding", inputs={"X": input}, outputs={"Out": out}, attrs={"alpha": alpha, "beta": beta}) return out def bilinear_tensor_product(x, y, size, act=None, name=None, param_attr=None, bias_attr=None): r""" :api_attr: Static Graph **Bilinear Tensor Product Layer** This layer performs bilinear tensor product on two inputs. For example: .. math:: out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1 In this formula: - :math:`x`: the first input contains M elements, shape is [batch_size, M]. - :math:`y`: the second input contains N elements, shape is [batch_size, N]. - :math:`W_{i}`: the i-th learned weight, shape is [M, N]. - :math:`out_{i}`: the i-th element of out, shape is [batch_size, size]. - :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`. Args: x (Variable): 2-D input tensor with shape [batch_size, M]. Data type is float32 or float64. y (Variable): 2-D input tensor with shape [batch_size, N]. Data type should be same as **x**. size (int): The dimension of this layer. act (str|None): Activation to be applied to the output of this layer. Default None. name(str|None): For detailed information, please refer to :ref:`api_guide_Name` . Usually name is no need to set and None by default. param_attr (ParamAttr|None): To specify the weight parameter attribute. Default: None, which means the default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . bias_attr (ParamAttr|None): To specify the bias parameter attribute. Default: None, which means the default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . Returns: Variable: A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**. Examples: .. code-block:: python import paddle paddle.enable_static() layer1 = paddle.static.data("t1", shape=[-1, 5], dtype="float32") layer2 = paddle.static.data("t2", shape=[-1, 4], dtype="float32") tensor = paddle.static.nn.bilinear_tensor_product(x=layer1, y=layer2, size=1000) """ helper = LayerHelper('bilinear_tensor_product', **locals()) dtype = helper.input_dtype('x') param_shape = [size, x.shape[1], y.shape[1]] w = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False) out = helper.create_variable_for_type_inference(dtype=dtype) inputs = {"X": x, "Y": y, "Weight": w} if helper.bias_attr: bias_size = [1, size] bias = helper.create_parameter( attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) inputs["Bias"] = bias helper.append_op( type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out}) # add activation return helper.append_activation(out) @templatedoc() def get_tensor_from_selected_rows(x, name=None): """ This operator gets tensor data from input with SelectedRows type, and outputs a LoDTensor. .. code-block:: text input x is SelectedRows: x.rows = [0, 5, 5, 4, 19] x.height = 20 x.value = [[1, 1] [2, 2] [2, 2] [3, 3] [6, 6]] Ouput is LoDTensor: out.shape = [5, 2] out.data = [[1, 1], [2, 2], [2, 2], [3, 3], [6, 6]] Args: x(SelectedRows): Input with SelectedRows type. The data type is float32, float64, int32 or int64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Variable: LoDTensor transformed from SelectedRows. The data type is same with input. Examples: .. code-block:: python import paddle.fluid as fluid b = fluid.default_main_program().global_block() input = b.create_var(name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS) out = fluid.layers.get_tensor_from_selected_rows(input) """ check_type(x, 'x', Variable, 'get_tensor_from_selected_rows') if x.type != core.VarDesc.VarType.SELECTED_ROWS: raise TypeError( "The type of 'x' in get_tensor_from_selected_rows must be SELECTED_ROWS." ) helper = LayerHelper('get_tensor_from_selected_rows', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='get_tensor_from_selected_rows', inputs={'X': x}, outputs={'Out': out}, attrs={}) return out def shuffle_channel(x, group, name=None): """ This operator shuffles the channels of input x. It divide the input channels in each group into :attr:`group` subgroups, and obtain a new order by selecting element from every subgroup one by one. Please refer to the paper https://arxiv.org/pdf/1707.01083.pdf .. code-block:: text Given a 4-D tensor input with the shape (N, C, H, W): input.shape = (1, 4, 2, 2) input.data =[[[[0.1, 0.2], [0.2, 0.3]], [[0.3, 0.4], [0.4, 0.5]], [[0.5, 0.6], [0.6, 0.7]], [[0.7, 0.8], [0.8, 0.9]]]] Given group: 2 then we get a 4-D tensor out with the same shape of input: out.shape = (1, 4, 2, 2) out.data = [[[[0.1, 0.2], [0.2, 0.3]], [[0.5, 0.6], [0.6, 0.7]], [[0.3, 0.4], [0.4, 0.5]], [[0.7, 0.8], [0.8, 0.9]]]] Args: x(Variable): The input tensor variable. It should be a 4-D tensor with shape [N, C, H, W] group(int): Indicating the counts of subgroups, It should divide the number of channels. Returns: out(Variable): the channels shuffling result is a tensor variable with the same shape and same type as the input. Raises: ValueError: If group is not an int type variable. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32') out = fluid.layers.shuffle_channel(x=input, group=2) """ helper = LayerHelper("shuffle_channel", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) if not isinstance(group, int): raise TypeError("group must be int type") helper.append_op( type="shuffle_channel", inputs={"X": x}, outputs={"Out": out}, attrs={"group": group}) return out @templatedoc() def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): """ **Temporal Shift Operator** ${comment} Args: x(Tensor): ${x_comment} seg_num(int): ${seg_num_comment} shift_ratio(float): ${shift_ratio_comment} name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. data_format(str, optional): Data format that specifies the layout of input. It can be "NCHW" or "NHWC". Default: "NCHW". Returns: out(Tensor): The temporal shifting result is a tensor with the same shape and same data type as the input. Raises: TypeError: seg_num must be int type. Examples: .. code-block:: python import paddle import paddle.nn.functional as F input = paddle.randn([6, 4, 2, 2]) out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2) """ if data_format not in ["NCHW", "NHWC"]: raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. " "Received Attr(data_format): {}.".format(data_format)) if in_dygraph_mode(): return _C_ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio', shift_ratio, 'data_format', data_format) helper = LayerHelper("temporal_shift", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift') check_type(seg_num, 'seg_num', int, 'temporal_shift') check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift') out = helper.create_variable_for_type_inference(dtype=x.dtype) if not isinstance(seg_num, int): raise TypeError("seg_num must be int type.") helper.append_op( type="temporal_shift", inputs={"X": x}, outputs={"Out": out}, attrs={ "seg_num": seg_num, "shift_ratio": shift_ratio, "data_format": data_format }) return out class PyFuncRegistry(object): _register_funcs = [] def __init__(self, func): if func is None or not callable(func): raise TypeError('func must be a Python function') self._func = func # find named args using reflection args = inspect.getargspec(self._func) if len(args[0]) == 0 and args[1] is None and args[2] is None: # Function with no inputs self._named_args = None else: self._named_args = args[0] self._id = core._append_python_callable_object_and_return_id(self) ''' Why record self here? 1. For debug usage. Users can call :code:`py_func.registered_func(idx)` method to find the registered function corresponding to :code:`idx`. 2. For increasing reference count of self. It seems that to release Python object whose reference count is 1 would cause segmentation fault error in C++ side. May be lack of Python GC in C++ side? ''' PyFuncRegistry._register_funcs.append(self) @classmethod def registered_func(cls, idx): return cls._register_funcs[idx]._func @classmethod def registered_func_num(cls): return len(cls._register_funcs) @property def id(self): return self._id def __call__(self, *args): if self._named_args is None: func_ret = self._func() else: kwargs = dict() idx = 0 for arg in self._named_args: kwargs[arg] = args[idx] idx += 1 func_ret = self._func(*args[idx:], **kwargs) if not isinstance(func_ret, (list, tuple)): func_ret = (func_ret, ) ret = [] for each_ret in func_ret: if each_ret is None or isinstance(each_ret, core.LoDTensor): ret.append(each_ret) continue if not isinstance(each_ret, np.ndarray): each_ret = np.array(each_ret) tensor = core.LoDTensor() tensor.set(each_ret, core.CPUPlace()) ret.append(tensor) return tuple(ret) @static_only @templatedoc() def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): """ :api_attr: Static Graph This OP is used to register customized Python OP to Paddle. The design principe of py_func is that Tensor and numpy array can be converted to each other easily. So you can use Python and numpy API to register a python OP. The forward function of the registered OP is ``func`` and the backward function of that is ``backward_func``. Paddle will call ``func`` at forward runtime and call ``backward_func`` at backward runtime(if ``backward_func`` is not None). ``x`` is the input of ``func``, whose type must be Tensor; ``out`` is the output of ``func``, whose type can be either Tensor or numpy array. The input of the backward function ``backward_func`` is ``x``, ``out`` and the gradient of ``out``. If ``out`` have no gradient, the relevant input of ``backward_func`` is None. If ``x`` do not have a gradient, the user should return None in ``backward_func``. The data type and shape of ``out`` should also be set correctly before this API is called, and the data type and shape of the gradient of ``out`` and ``x`` will be inferred automatically. This API can also be used to debug the neural network by setting the ``func`` as a function that only print variables. Args: func (callable): The forward function of the registered OP. When the network is running, the forward output ``out`` will be calculated according to this function and the forward input ``x``. In ``func`` , it's suggested that we actively convert Tensor into a numpy array, so that we can use Python and numpy API arbitrarily. If not, some operations of numpy may not be compatible. x (Tensor|tuple(Tensor)|list[Tensor]): The input of the forward function ``func``. It can be Tensor|tuple(Tensor)|list[Tensor]. In addition, Multiple Tensor should be passed in the form of tuple(Tensor) or list[Tensor]. out (T|tuple(T)|list[T]): The output of the forward function ``func``, it can be T|tuple(T)|list[T], where T can be either Tensor or numpy array. Since Paddle cannot automatically infer the shape and type of ``out``, you must create ``out`` in advance. backward_func (callable, optional): The backward function of the registered OP. Its default value is None, which means there is no reverse calculation. If it is not None, ``backward_func`` is called to calculate the gradient of ``x`` when the network is at backward runtime. skip_vars_in_backward_input (Tensor, optional): It's used to limit the input list of ``backward_func``, and it can be Tensor|tuple(Tensor)|list[Tensor]. It must belong to either ``x`` or ``out``. The default value is None, which means that no tensors need to be removed from ``x`` and ``out``. If it is not None, these tensors will not be the input of ``backward_func``. This parameter is only useful when ``backward_func`` is not None. Returns: Tensor|tuple(Tensor)|list[Tensor]: The output ``out`` of the forward function ``func``. Examples: .. code-block:: python # example 1: import paddle import six import numpy as np paddle.enable_static() # Creates a forward function, Tensor can be input directly without # being converted into numpy array. def tanh(x): return np.tanh(x) # Skip x in backward function and return the gradient of x # Tensor must be actively converted to numpy array, otherwise, # operations such as +/- can't be used. def tanh_grad(y, dy): return np.array(dy) * (1 - np.square(np.array(y))) # Creates a forward function for debugging running networks(print value) def debug_func(x): print(x) def create_tmp_var(name, dtype, shape): return paddle.static.default_main_program().current_block().create_var( name=name, dtype=dtype, shape=shape) def simple_net(img, label): hidden = img for idx in six.moves.range(4): hidden = paddle.static.nn.fc(hidden, size=200) new_hidden = create_tmp_var(name='hidden_{}'.format(idx), dtype=hidden.dtype, shape=hidden.shape) # User-defined forward and backward hidden = paddle.static.py_func(func=tanh, x=hidden, out=new_hidden, backward_func=tanh_grad, skip_vars_in_backward_input=hidden) # User-defined debug functions that print out the input Tensor paddle.static.py_func(func=debug_func, x=hidden, out=None) prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax') ce_loss = paddle.nn.loss.CrossEntropyLoss() return ce_loss(prediction, label) x = paddle.static.data(name='x', shape=[1,4], dtype='float32') y = paddle.static.data(name='y', shape=[1,10], dtype='int64') res = simple_net(x, y) exe = paddle.static.Executor(paddle.CPUPlace()) exe.run(paddle.static.default_startup_program()) input1 = np.random.random(size=[1,4]).astype('float32') input2 = np.random.randint(1, 10, size=[1,10], dtype='int64') out = exe.run(paddle.static.default_main_program(), feed={'x':input1, 'y':input2}, fetch_list=[res.name]) print(out) .. code-block:: python # example 2: # This example shows how to turn Tensor into numpy array and # use numpy API to register an Python OP import paddle import numpy as np paddle.enable_static() def element_wise_add(x, y): # Tensor must be actively converted to numpy array, otherwise, # numpy.shape can't be used. x = np.array(x) y = np.array(y) if x.shape != y.shape: raise AssertionError("the shape of inputs must be the same!") result = np.zeros(x.shape, dtype='int32') for i in range(len(x)): for j in range(len(x[0])): result[i][j] = x[i][j] + y[i][j] return result def create_tmp_var(name, dtype, shape): return paddle.static.default_main_program().current_block().create_var( name=name, dtype=dtype, shape=shape) def py_func_demo(): start_program = paddle.static.default_startup_program() main_program = paddle.static.default_main_program() # Input of the forward function x = paddle.static.data(name='x', shape=[2,3], dtype='int32') y = paddle.static.data(name='y', shape=[2,3], dtype='int32') # Output of the forward function, name/dtype/shape must be specified output = create_tmp_var('output','int32', [3,1]) # Multiple Variable should be passed in the form of tuple(Variale) or list[Variale] paddle.static.py_func(func=element_wise_add, x=[x,y], out=output) exe=paddle.static.Executor(paddle.CPUPlace()) exe.run(start_program) # Feed numpy array to main_program input1 = np.random.randint(1, 10, size=[2,3], dtype='int32') input2 = np.random.randint(1, 10, size=[2,3], dtype='int32') out = exe.run(main_program, feed={'x':input1, 'y':input2}, fetch_list=[output.name]) print("{0} + {1} = {2}".format(input1, input2, out)) py_func_demo() # Reference output: # [[5, 9, 9] + [[7, 8, 4] = [array([[12, 17, 13] # [7, 5, 2]] [1, 3, 3]] [8, 8, 5]], dtype=int32)] """ helper = LayerHelper('py_func', **locals()) check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func') if x is None: x = [] elif isinstance(x, Variable): x = [x] elif isinstance(x, tuple): x = list(x) elif not isinstance(x, (list, tuple, Variable)): raise TypeError('Input must be Variable/list(Variable)/tuple(Variable)') check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func') if out is None: out_list = [] elif isinstance(out, Variable): out_list = [out] elif isinstance(out, tuple): out_list = list(out) elif isinstance(out, list): out_list = out else: raise TypeError( 'Output must be Variable/list(Variable)/tuple(Variable)') fwd_func_id = PyFuncRegistry(func).id bwd_func_id = PyFuncRegistry( backward_func).id if backward_func is not None else -1 for each_out in out_list: if len(each_out.shape) == 0: raise ValueError( 'Output shapes of py_func op should be provided by users manually' ) backward_skip_vars = set() if backward_func is not None and skip_vars_in_backward_input is not None: if isinstance(skip_vars_in_backward_input, Variable): skip_vars_in_backward_input = [skip_vars_in_backward_input] fwd_in_out = [v.name for v in x] fwd_in_out.extend([v.name for v in out_list]) fwd_in_out = set(fwd_in_out) backward_skip_vars = set() for v in skip_vars_in_backward_input: if not v.name in fwd_in_out: raise ValueError( 'Variable {} is not found in forward inputs and outputs' .format(v.name)) backward_skip_vars.add(v.name) helper.append_op( type='py_func', inputs={'X': x}, outputs={'Out': out_list}, attrs={ 'forward_callable_id': fwd_func_id, 'backward_callable_id': bwd_func_id, 'backward_skip_vars': list(backward_skip_vars) }) return out # For debug usage py_func.registered_func = PyFuncRegistry.registered_func py_func.registered_func_num = PyFuncRegistry.registered_func_num @templatedoc() def psroi_pool(input, rois, output_channels, spatial_scale, pooled_height, pooled_width, name=None): """ ${comment} Parameters: input (Variable): ${x_comment} rois (Variable): LoDTensor, ROIs (Regions of Interest) to pool over.It should be a 2-D LoDTensor of shape (num_rois, 4), the lod level is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates. The data type is the same as `input` output_channels (int): ${output_channels_comment} spatial_scale (float): ${spatial_scale_comment} Default: 1.0 pooled_height (int): ${pooled_height_comment} Default: 1 pooled_width (int): ${pooled_width_comment} Default: 1 name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: ${out_comment}. Return Type: Variable Examples: .. code-block:: python import paddle.fluid as fluid import paddle paddle.enable_static() x = fluid.data(name='x', shape=[100, 490, 28, 28], dtype='float32') rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32') pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7) """ helper = LayerHelper('psroi_pool', **locals()) # check attrs if not isinstance(output_channels, int): raise TypeError("output_channels must be int type") if not isinstance(spatial_scale, float): raise TypeError("spatial_scale must be float type") if not isinstance(pooled_height, int): raise TypeError("pooled_height must be int type") if not isinstance(pooled_width, int): raise TypeError("pooled_width must be int type") dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='psroi_pool', inputs={'X': input, 'ROIs': rois}, outputs={'Out': out}, attrs={ 'output_channels': output_channels, 'spatial_scale': spatial_scale, 'pooled_height': pooled_height, 'pooled_width': pooled_width }) return out @templatedoc() def prroi_pool(input, rois, spatial_scale=1.0, pooled_height=1, pooled_width=1, batch_roi_nums=None, name=None): """ The precise roi pooling implementation for paddle. Reference: https://arxiv.org/pdf/1807.11590.pdf Args: input (Variable):The input of precise roi pooliing.The shape of input tensor is [N,C,H,W]. Where N is batch size,C is number of input channels,H is height of the feature, and W is the width of the feature. rois (Variable): ROIs (Regions of Interest) to pool over.It should be a 2-D LoDTensor or Tensor of shape (num_rois, 4), the lod level is 1 when it is LoDTensor. The LoD include the rois's batch index information. If rois is Tensor, its batch index information should be provided by batch_index. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates. spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width). Equals the reciprocal of total stride in convolutional layers, Default: 1.0. pooled_height (integer): The pooled output height. Default: 1. pooled_width (integer): The pooled output width. Default: 1. batch_roi_nums (Variable): The number of roi for each image in batch. It should be 1-D Tensor, with shape [N] and dtype int64, where N is the batch size. Default: None. Be note: The lod of input should be empty when batch_roi_nums has values; name (str, default None): The name of this operation. Returns: Variable(Tensor):The shape of the returned Tensor is (N, C, pooled_height, pooled_width), with value type float32,float16. N, C denote batch_size and channels of input respectively. Examples: .. code-block:: python ## prroi_pool without batch_roi_num import paddle.fluid as fluid x = fluid.data(name='x', shape=[None, 490, 28, 28], dtype='float32') rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32') pool_out = fluid.layers.prroi_pool(x, rois, 1.0, 7, 7) ## prroi_pool with batch_roi_num batchsize=4 x2 = fluid.data(name='x2', shape=[batchsize, 490, 28, 28], dtype='float32') rois2 = fluid.data(name='rois2', shape=[batchsize, 4], dtype='float32') batch_rois_num = fluid.data(name='rois_nums', shape=[batchsize], dtype='int64') pool_out2 = fluid.layers.prroi_pool(x2, rois2, 1.0, 7, 7, batch_roi_nums=batch_rois_num) """ check_variable_and_dtype(input, 'input', ['float32'], 'prroi_pool') check_variable_and_dtype(rois, 'rois', ['float32'], 'prroi_pool') helper = LayerHelper('prroi_pool', **locals()) # check attrs if not isinstance(spatial_scale, float): raise TypeError("spatial_scale must be float type") if not isinstance(pooled_height, int): raise TypeError("pooled_height must be int type") if not isinstance(pooled_width, int): raise TypeError("pooled_width must be int type") dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) inputs_op = {'X': input, 'ROIs': rois} if batch_roi_nums is not None: inputs_op['BatchRoINums'] = batch_roi_nums helper.append_op( type='prroi_pool', inputs=inputs_op, outputs={'Out': out}, attrs={ 'spatial_scale': spatial_scale, 'pooled_height': pooled_height, 'pooled_width': pooled_width }) return out def pixel_shuffle(x, upscale_factor): """ This op rearranges elements in a tensor of shape [N, C, H, W] to a tensor of shape [N, C/r**2, H*r, W*r]. This is useful for implementing efficient sub-pixel convolution with a stride of 1/r. Please refer to the paper: `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158v2>`_ . by Shi et. al (2016) for more details. Parameters: x(Variable): 4-D tensor, the data type should be float32 or float64. upscale_factor(int): factor to increase spatial resolution. Returns: Out(Variable): Reshaped tensor according to the new dimension. Raises: ValueError: If the square of upscale_factor cannot divide the channels of input. Examples: .. code-block:: python # declarative mode import paddle.fluid as fluid import numpy as np input = fluid.data(name="input", shape=[2,9,4,4]) output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3) place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) input_data = np.random.rand(2,9,4,4).astype("float32") output_data = exe.run(fluid.default_main_program(), feed={"input":input_data}, fetch_list=[output], return_numpy=True) # print(output.shape) # (2L, 1L, 12L, 12L) """ check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle') helper = LayerHelper("pixel_shuffle", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) if not isinstance(upscale_factor, int): raise TypeError("upscale factor must be int type") helper.append_op( type="pixel_shuffle", inputs={"X": x}, outputs={"Out": out}, attrs={"upscale_factor": upscale_factor}) return out def fsp_matrix(x, y): """ **FSP matrix op** This op is used to calculate the flow of solution procedure (FSP) matrix of two 4-D Tensor feature maps. Given feature map x with shape [x_channel, h, w] and feature map y with shape [y_channel, h, w], we can get the fsp matrix of x and y in two steps: 1. reshape x into matrix with shape [x_channel, h * w] and reshape and transpose y into matrix with shape [h * w, y_channel]. 2. multiply x and y to get fsp matrix with shape [x_channel, y_channel]. The output is a batch of fsp matrices. Args: x (Variable): A 4-D Tensor feature map with shape [batch_size, x_channel, height, width]. A Tensor with type float32, float64. y (Variable): A 4-D Tensor feature map with shape [batch_size, y_channel, height, width]. The y_channel can be different with the x_channel of Input(X) while the other dimensions must be the same with Input(X)'s. A Tensor with type float32, float64. Returns: fsp matrix (Variable): The output of FSP op with shape [batch_size, x_channel, y_channel]. The x_channel is the channel of x and the y_channel is the channel of y. A Tensor with type float32, float64. Examples: .. code-block:: python import paddle.fluid as fluid data = fluid.data(name='data', shape=[None, 3, 32, 32]) feature_map_0 = fluid.layers.conv2d(data, num_filters=2, filter_size=3) feature_map_1 = fluid.layers.conv2d(feature_map_0, num_filters=2, filter_size=1) loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1) """ check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix') helper = LayerHelper('fsp_matrix', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype( input_param_name='x')) helper.append_op(type='fsp', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def continuous_value_model(input, cvm, use_cvm=True): r""" **continuous_value_model layers** Now, this OP is used in CTR project to remove or dispose show and click value in :attr:`input`. :attr:`input` is an embedding vector including show and click value, whose shape is :math:`[N, D]` (N is batch size. D is `2 + embedding dim` ). Show and click at first two dims of embedding vector D. If :attr:`use_cvm` is True, it will calculate :math:`log(show)` and :math:`log(click)` , and output shape is :math:`[N, D]` . If :attr:`use_cvm` is False, it will remove show and click from :attr:`input` , and output shape is :math:`[N, D - 2]` . :attr:`cvm` is show_click info, whose shape is :math:`[N, 2]` . Args: input (Variable): The input variable. A 2-D LoDTensor with shape :math:`[N, D]` , where N is the batch size, D is `2 + the embedding dim` . `lod level = 1` . A Tensor with type float32, float64. cvm (Variable): Show and click variable. A 2-D Tensor with shape :math:`[N, 2]` , where N is the batch size, 2 is show and click. A Tensor with type float32, float64. use_cvm (bool): Use show_click or not. if use, the output dim is the same as input. if not use, the output dim is `input dim - 2` (remove show and click) Returns: Variable: A 2-D LodTensor with shape :math:`[N, M]` . if :attr:`use_cvm` = True, M is equal to input dim D. if False, M is equal to `D - 2`. \ A Tensor with same type as input. Examples: .. code-block:: python import paddle.fluid as fluid input = fluid.data(name="input", shape=[64, 1], dtype="int64") label = fluid.data(name="label", shape=[64, 1], dtype="int64") embed = fluid.layers.embedding( input=input, size=[100, 11], dtype='float32') ones = fluid.layers.fill_constant_batch_size_like(input=label, shape=[-1, 1], dtype="int64", value=1) show_clk = fluid.layers.cast(fluid.layers.concat([ones, label], axis=1), dtype='float32') show_clk.stop_gradient = True input_with_cvm = fluid.layers.continuous_value_model(embed, show_clk, True) """ helper = LayerHelper('cvm', **locals()) out = helper.create_variable(dtype=input.dtype) check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'cvm') helper.append_op( type='cvm', inputs={'X': [input], 'CVM': [cvm]}, outputs={'Y': [out]}, attrs={"use_cvm": use_cvm}) return out def where(condition): """ Return an int64 tensor with rank 2, specifying the coordinate of true element in `condition`. Args: condition(Variable): A bool tensor with rank at least 1, the data type is bool. Returns: Variable, the output data type is int64. : The tensor variable storing a 2-D tensor, which involves all coordinate. Examples: .. code-block:: python import paddle.fluid as fluid import paddle.fluid.layers as layers import numpy as np # condition is a tensor [True, False, True] condition = layers.assign(np.array([1, 0, 1], dtype='int32')) condition = layers.cast(condition, 'bool') out = layers.where(condition) # [[0], [2]] # condition is a tensor [[True, False], [False, True]] condition = layers.assign(np.array([[1, 0], [0, 1]], dtype='int32')) condition = layers.cast(condition, 'bool') out = layers.where(condition) # [[0, 0], [1, 1]] # condition is a tensor [False, False, False] condition = layers.assign(np.array([0, 0, 0], dtype='int32')) condition = layers.cast(condition, 'bool') out = layers.where(condition) # [[]] """ if in_dygraph_mode(): return _C_ops.where_index(condition) helper = LayerHelper("where_index", **locals()) out = helper.create_variable_for_type_inference( dtype=core.VarDesc.VarType.INT64) helper.append_op( type='where_index', inputs={'Condition': condition}, outputs={'Out': [out]}) return out @deprecated(since="2.0.0", update_to="paddle.sign") def sign(x): r""" This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero. Args: x(Variable|numpy.ndarray): The input variable could be N-D tensor or N-D numpy array, \ the input data type is float32 or float64. Returns: Variable, the output data type is the same as input data type. : The output sign tensor with identical shape to input :attr:`x`. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np # [1.0, 0.0, -1.0] data = fluid.layers.sign(np.array([3.0, 0.0, -2.0], dtype='float32')) """ helper = LayerHelper("sign", **locals()) check_type(x, 'x', (Variable, np.ndarray), 'sign') if isinstance(x, np.ndarray): x = assign(x) check_dtype(x.dtype, 'x', ['float16', 'float32', 'float64'], 'sign') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]}) return out def unique(x, dtype='int32'): r""" Return a unique tensor for `x` and an index tensor pointing to this unique tensor. Args: x(Tensor): A 1-D input tensor, it's data type should be float32, float64, int32, int64. dtype(np.dtype|str, optional): The type of index tensor: int32, int64. Default: int32. Returns: tuple: (out, index). `out` is the unique tensor for `x`, with identical dtype to `x`, and \ `index` is an index tensor pointing to `out`, by which user can recover the original `x` tensor. Examples: .. code-block:: python import numpy as np import paddle.fluid as fluid x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32')) out, index = fluid.layers.unique(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1] """ check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'], "unique") helper = LayerHelper("unique", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) index = helper.create_variable_for_type_inference(dtype) helper.append_op( type='unique', inputs={'X': x}, attrs={'dtype': convert_np_dtype_to_dtype_(dtype)}, outputs={'Out': [out], 'Index': [index]}) return out, index def unique_with_counts(x, dtype='int32'): r""" This OP return a unique tensor for `x` , and count tensor that the count of unique result in raw input, \ and an index tensor pointing to this unique tensor. **NOTICE**: This op support the variable type of Tensor only. Args: x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64. dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32. Returns: tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \ and data type of output :attr:`index` and :attr:`count` will be int32 or int64.: The :attr:`out` is unique tensor for input :attr:`x`,\ the data shape is :math:`[K]`, the `K` may be different to the `N` in shape of :attr:`x`. :attr:`index` is an index tensor pointing\ to :attr:`out`, the data shape is :math:`[N]` , the data shape is the same as input :attr:`x`. :attr:`count` is count of unique element in\ the :attr:`x`, the data shape is :math:`[K]`, the data shape is the same as output :attr:`out`. Examples: .. code-block:: python import numpy as np import paddle.fluid as fluid x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32')) out, index, count = fluid.layers.unique_with_counts(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1] # count is [1, 3, 1, 1] # x.shape=(6,) out.shape=(4,), index.shape=(6,), count.shape=(4,) """ check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'], "unique_with_counts") if not (dtype == 'int32' or dtype == 'int64'): raise TypeError( "Op unique_with_counts, index dtype must be int32 or int64") if x is None or len(x.shape) != 1: raise ValueError( "Op unique_with_counts, x must not be null and size of dim must be 1" ) helper = LayerHelper("unique_with_counts", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) index = helper.create_variable_for_type_inference(dtype) count = helper.create_variable_for_type_inference(dtype) helper.append_op( type='unique_with_counts', inputs={'X': x}, attrs={'dtype': convert_np_dtype_to_dtype_(dtype)}, outputs={'Out': [out], 'Index': [index], 'Count': [count]}) return out, index, count def deformable_conv(input, offset, mask, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, deformable_groups=None, im2col_step=None, param_attr=None, bias_attr=None, modulated=True, name=None): r""" :api_attr: Static Graph **Deformable Convolution op** Compute 2-D deformable convolution on 4-D input. Given input image x, output feature map y, the deformable convolution operation can be expressed as follow: Deformable Convolution v2: .. math:: y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k} Deformable Convolution v1: .. math:: y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)} Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location, Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results <https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_. Example: - Input: Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)` Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})` Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})` - Output: Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where .. math:: H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 Args: input (Variable): The input image with [N, C, H, W] format. A Tensor with type float32, float64. offset (Variable): The input coordinate offset of deformable convolution layer. A Tensor with type float32, float64. Mask (Variable, Optional): The input mask of deformable convolution layer. A Tensor with type float32, float64. It should be None when you use deformable convolution v1. num_filters(int): The number of filter. It is as same as the output image channel. filter_size (int|tuple): The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise, the filter will be a square. stride (int|tuple): The stride size. If stride is a tuple, it must contain two integers, (stride_H, stride_W). Otherwise, the stride_H = stride_W = stride. Default: stride = 1. padding (int|tuple): The padding size. If padding is a tuple, it must contain two integers, (padding_H, padding_W). Otherwise, the padding_H = padding_W = padding. Default: padding = 0. dilation (int|tuple): The dilation size. If dilation is a tuple, it must contain two integers, (dilation_H, dilation_W). Otherwise, the dilation_H = dilation_W = dilation. Default: dilation = 1. groups (int): The groups number of the deformable conv layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups=1. deformable_groups (int): The number of deformable group partitions. Default: deformable_groups = 1. im2col_step (int): Maximum number of images per im2col computation; The total batch size should be devisable by this value or smaller than this value; if you face out of memory problem, you can try to use a smaller value here. Default: im2col_step = 64. param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights of deformable conv. If it is set to None or one attribute of ParamAttr, deformable conv will create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None. bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of deformable conv layer. If it is set to False, no bias will be added to the output units. If it is set to None or one attribute of ParamAttr, conv2d will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \ used while True. Default: True. name(str, Optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None. Returns: Variable: The tensor variable storing the deformable convolution \ result. A Tensor with type float32, float64. Raises: ValueError: If the shapes of input, filter_size, stride, padding and groups mismatch. Examples: .. code-block:: python #deformable conv v2: import paddle.fluid as fluid import paddle paddle.enable_static() C_in, H_in, W_in = 3, 32, 32 filter_size, deformable_groups = 3, 1 data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32') out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask, num_filters=2, filter_size=filter_size, padding=1, modulated=True) #deformable conv v1: import paddle.fluid as fluid C_in, H_in, W_in = 3, 32, 32 filter_size, deformable_groups = 3, 1 data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None, num_filters=2, filter_size=filter_size, padding=1, modulated=False) """ check_variable_and_dtype(input, "input", ['float32', 'float64'], 'deformable_conv') check_variable_and_dtype(offset, "offset", ['float32', 'float64'], 'deformable_conv') check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv') num_channels = input.shape[1] assert param_attr is not False, "param_attr should not be False here." helper = LayerHelper('deformable_conv', **locals()) dtype = helper.input_dtype() if not isinstance(input, Variable): raise TypeError("Input of deformable_conv must be Variable") if not isinstance(offset, Variable): raise TypeError("Input Offset of deformable_conv must be Variable") if groups is None: num_filter_channels = num_channels else: if num_channels % groups != 0: raise ValueError("num_channels must be divisible by groups.") num_filter_channels = num_channels // groups filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') stride = utils.convert_to_list(stride, 2, 'stride') padding = utils.convert_to_list(padding, 2, 'padding') dilation = utils.convert_to_list(dilation, 2, 'dilation') input_shape = input.shape filter_shape = [num_filters, int(num_filter_channels)] + filter_size def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * num_channels if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " "filter size.".format(filter_elem_num)) std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, default_initializer=_get_default_param_initializer()) pre_bias = helper.create_variable_for_type_inference(dtype) if modulated: helper.append_op( type='deformable_conv', inputs={ 'Input': input, 'Filter': filter_param, 'Offset': offset, 'Mask': mask, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'deformable_groups': deformable_groups, 'im2col_step': im2col_step, }) else: helper.append_op( type='deformable_conv_v1', inputs={ 'Input': input, 'Filter': filter_param, 'Offset': offset, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'deformable_groups': deformable_groups, 'im2col_step': im2col_step, }) output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) return output def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): r""" This op returns a col buffer of sliding local blocks of input x, also known as im2col for batched 2D image tensors. For each block under the convolution filter, all element will be rearranged as a column. While the convolution filter sliding over the input feature map, a series of such columns will be formed. For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout] can be calculated as following. .. math:: dkernel[0] &= dilations[0] \times (kernel\_sizes[0] - 1) + 1 dkernel[1] &= dilations[1] \times (kernel\_sizes[1] - 1) + 1 hout &= \frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1 wout &= \frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1 Cout &= C \times kernel\_sizes[0] \times kernel\_sizes[1] Lout &= hout \times wout Parameters: x(Tensor): 4-D Tensor, input tensor of format [N, C, H, W], data type can be float32 or float64 kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w] or an integer k treated as [k, k]. strides(int|list): The strides, should be [stride_h, stride_w] or an integer stride treated as [sride, stride]. For default, strides will be [1, 1]. paddings(int|list): The paddings of each dimension, should be [padding_top, padding_left, padding_bottom, padding_right] or [padding_h, padding_w] or an integer padding. If [padding_h, padding_w] was given, it will expanded to [padding_h, padding_w, padding_h, padding_w]. If an integer padding was given, [padding, padding, padding, padding] will be used. For default, paddings will be [0, 0, 0, 0] dilations(int|list): the dilations of convolution kernel, should be [dilation_h, dilation_w], or an integer dilation treated as [dilation, dilation]. For default, it will be [1, 1]. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: The tensor corresponding to the sliding local blocks. The output shape is [N, Cout, Lout] as decriabled above. Cout is the total number of values within each block, and Lout is the total number of such blocks. The data type of output is the same as the input :math:`x` Return Type: Tensor Examples: .. code-block:: python import paddle import paddle.nn.functional as F x = paddle.randn((100,3,224,224)) y = F.unfold(x, [3, 3], 1, 1, 1) """ helper = LayerHelper("unfold", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold') assert len(x.shape) == 4, \ "input should be the format of [N, C, H, W]" if isinstance(kernel_sizes, int): kernel_sizes = [kernel_sizes, kernel_sizes] else: assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \ "kernel_sizes should either be an integer or a list of two integers" if isinstance(strides, int): strides = [strides, strides] else: assert isinstance(strides, list) and (len(strides) == 2), \ "strides should either be an integer or a list of two integers" if isinstance(dilations, int): dilations = [dilations, dilations] else: assert isinstance(dilations, list) and (len(dilations) == 2), \ "dilations should either be an integer or a list of two integers" if isinstance(paddings, int): paddings = [paddings] * 4 elif isinstance(paddings, list): if len(paddings) == 2: paddings = paddings * 2 elif len(paddings) == 4: pass else: raise ValueError( "paddings should either be an integer or a list of 2 or 4 integers" ) else: raise ValueError( "Unexpected type of paddings, it should be either an integer or a list" "of 2 or 4 integers") out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="unfold", inputs={"X": x}, outputs={"Y": out}, attrs={ "kernel_sizes": kernel_sizes, "strides": strides, "paddings": paddings, "dilations": dilations }) return out def deformable_roi_pooling(input, rois, trans, no_trans=False, spatial_scale=1.0, group_size=[1, 1], pooled_height=1, pooled_width=1, part_size=None, sample_per_part=1, trans_std=0.1, position_sensitive=False, name=None): r""" Deformable ROI Pooling Layer Performs deformable region-of-interest pooling on inputs. As described in `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_, it will get offset for each bin after roi pooling so that pooling at correct region. Batch_size will change to the number of region bounding boxes after deformable_roi_pooling. The operation has three steps: 1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height. 2. Add offset to pixel in ROI to get new location and the new value which are computed directly through bilinear interpolation with four nearest pixel. 3. Sample several points in each bin to get average values as output. Args: input (Variable):The input of deformable roi pooling and it is tensor which value type is float32. The shape of input is [N, C, H, W]. Where N is batch size, C is number of input channels, H is height of the feature, and W is the width of the feature. rois (Variable): ROIs (Regions of Interest) with type float32 to pool over. It should be a 2-D LoDTensor of shape (num_rois, 4), and the lod level is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates, which value type is float32. trans (Variable): Offset of features on ROIs while pooling which value type is float32. The format is [N, C, H, W], where N is number of ROIs, C is number of channels, which indicate the offset distance in the x and y directions, H is pooled height, and W is pooled width. no_trans (bool): Whether to add offset to get new value or not while roi pooling, which value with type bool is True or False. If value is True, no offset will be added in operation. Default: False. spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width), which value type is float32. Equals the reciprocal of total stride in convolutional layers, Default: 1.0. group_size (list|tuple): The number of groups which input channels are divided and the input is list or tuple, which value type is int32. (eg.number of input channels is k1 * k2 * (C + 1), which k1 and k2 are group width and height and C+1 is number of output channels.) eg.(4, 6), which 4 is height of group and 6 is width of group. Default: [1, 1]. pooled_height (int): The pooled output height which value type is int32. Default: 1. pooled_width (int): The pooled output width which value type is int32. Default: 1. part_size (list|tuple): The height and width of offset which values in list or tuple is int32, eg.(4, 6), which height is 4 and width is 6, and values always equal to pooled_height \ and pooled_width. Default: if None, default value is [pooled_height, pooled_width]. sample_per_part (int): The number of samples in each bin which value type is int32. If value is bigger, it will consume more performance. Default: 1. trans_std (float): Coefficient of offset which value type is float32. It controls weight of offset. Default: 0.1. position_sensitive (bool): Whether to choose deformable psroi pooling mode or not, and value type is bool(True or False). If value is False, input dimension equals to output dimension. \ If value is True, input dimension should be output dimension * pooled_height * pooled_width. Default: False. name (str|None): Name of layer. Default: None. Returns: Variable: Output of deformable roi pooling is that, if position sensitive is False, input dimension equals to output dimension. If position sensitive is True,\ input dimension should be the result of output dimension divided by pooled height and pooled width. Examples: .. code-block:: python # position_sensitive=True import paddle.fluid as fluid input = fluid.data(name="input", shape=[2, 192, 64, 64], dtype='float32') rois = fluid.data(name="rois", shape=[-1, 4], dtype='float32', lod_level=1) trans = fluid.data(name="trans", shape=[2, 384, 64, 64], dtype='float32') x = fluid.layers.deformable_roi_pooling(input=input, rois=rois, trans=trans, no_trans=False, spatial_scale=1.0, group_size=(1, 1), pooled_height=8, pooled_width=8, part_size=(8, 8), sample_per_part=4, trans_std=0.1, position_sensitive=True) # position_sensitive=False import paddle.fluid as fluid input = fluid.data(name="input", shape=[2, 192, 64, 64], dtype='float32') rois = fluid.data(name="rois", shape=[-1, 4], dtype='float32', lod_level=1) trans = fluid.data(name="trans", shape=[2, 384, 64, 64], dtype='float32') x = fluid.layers.deformable_roi_pooling(input=input, rois=rois, trans=trans, no_trans=False, spatial_scale=1.0, group_size=(1, 1), pooled_height=8, pooled_width=8, part_size=(8, 8), sample_per_part=4, trans_std=0.1, position_sensitive=False) """ check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'deformable_roi_pooling') check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'deformable_roi_pooling') check_variable_and_dtype(trans, 'trans', ['float32', 'float64'], 'deformable_roi_pooling') check_type(group_size, 'group_size', (list, tuple), 'deformable_roi_pooling') if part_size is not None: check_type(part_size, 'part_size', (list, tuple), 'deformable_roi_pooling') input_channels = input.shape[1] if position_sensitive == False: output_channels = input_channels else: output_channels = input_channels / pooled_height / pooled_width if part_size is None: part_height = pooled_height part_width = pooled_width part_size = [part_height, part_width] part_size = utils.convert_to_list(part_size, 2, 'part_size') group_size = utils.convert_to_list(group_size, 2, 'group_size') helper = LayerHelper('deformable_psroi_pooling', **locals()) dtype = helper.input_dtype() output = helper.create_variable_for_type_inference(dtype) top_count = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type="deformable_psroi_pooling", inputs={"Input": input, "ROIs": rois, "Trans": trans}, outputs={"Output": output, "TopCount": top_count}, attrs={ "no_trans": no_trans, "spatial_scale": spatial_scale, "output_dim": output_channels, "group_size": group_size, "pooled_height": pooled_height, "pooled_width": pooled_width, "part_size": part_size, "sample_per_part": sample_per_part, "trans_std": trans_std }) return output @deprecated(since="2.0.0", update_to="paddle.shard_index") def shard_index(input, index_num, nshards, shard_id, ignore_value=-1): """ Reset the values of `input` according to the shard it beloning to. Every value in `input` must be a non-negative integer, and the parameter `index_num` represents the integer above the maximum value of `input`. Thus, all values in `input` must be in the range [0, index_num) and each value can be regarded as the offset to the beginning of the range. The range is further split into multiple shards. Specifically, we first compute the `shard_size` according to the following formula, which represents the number of integers each shard can hold. So for the i'th shard, it can hold values in the range [i*shard_size, (i+1)*shard_size). :: shard_size = (index_num + nshards - 1) // nshards For each value `v` in `input`, we reset it to a new value according to the following formula: :: v = v - shard_id * shard_size if shard_id * shard_size <= v < (shard_id+1) * shard_size else ignore_value That is, the value `v` is set to the new offset within the range represented by the shard `shard_id` if it in the range. Otherwise, we reset it to be `ignore_value`. Args: input (Tensor): Input tensor with data type int64 or int32. It's last dimension must be 1. index_num (int): An integer represents the integer above the maximum value of `input`. nshards (int): The number of shards. shard_id (int): The index of the current shard. ignore_value (int): An integer value out of sharded index range. Returns: Tensor. Examples: .. code-block:: python import paddle label = paddle.to_tensor([[16], [1]], "int64") shard_label = paddle.shard_index(input=label, index_num=20, nshards=2, shard_id=0) print(shard_label) # [[-1], [1]] """ check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index') op_type = 'shard_index' helper = LayerHelper(op_type, **locals()) if shard_id < 0 or shard_id >= nshards: raise ValueError('The shard_id(%d) should be in [0, %d)' % (shard_id, nshards)) out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=op_type, inputs={'X': [input]}, outputs={'Out': out}, attrs={ 'index_num': index_num, 'nshards': nshards, 'shard_id': shard_id, 'ignore_value': ignore_value }, stop_gradient=True) return out @templatedoc() def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None): r""" This operator implements the hard_swish activation function. Hard_swish is proposed in MobileNetV3, and performs better in computational stability and efficiency compared to swish function. For more details please refer to: https://arxiv.org/pdf/1905.02244.pdf The formula is as follows: .. math:: out = \\frac{x * (min(max(0, x+offset), threshold))}{scale} In the above equation: ``threshold`` and ``scale`` should be positive, ``offset`` can be positive or negative. It is recommended to use default parameters. Args: x (Variable): Input feature, multi-dimensional Tensor. The data type should be float32 or float64. threshold (float, optional): The threshold in Relu function. Default: 6.0 scale (float, optional): The scale factor. Default: 6.0 offset (float, optional): The offset factor. Default: 3.0 name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: The output tensor with the same shape and data type as input. Examples: .. code-block:: python import paddle.fluid as fluid import paddle import numpy as np paddle.enable_static() DATATYPE='float32' x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE) x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE) y = fluid.layers.hard_swish(x) place = fluid.CPUPlace() #place = fluid.CUDAPlace(0) exe = fluid.Executor(place) out, = exe.run(feed={'x':x_data}, fetch_list=[y.name]) print(out) # [[0.66666667, 1.66666667,3., 4.]] """ if in_dygraph_mode(): return _C_ops.hard_swish(x, 'threshold', threshold, 'scale', scale, 'offset', offset) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'hard_swish') helper = LayerHelper('hard_swish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='hard_swish', inputs={'X': x}, outputs={'Out': out}, attrs={'threshold': threshold, 'scale': scale, 'offset': offset}) return out @templatedoc() def mish(x, threshold=20, name=None): r""" This operator implements the mish activation function. Refer to `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_ The formula is as follows if :attr:`threshold` is :code:`None` or negative: .. math:: out = x * \\tanh(\\ln(1 + e^{x})) The formula is as follows if :attr:`threshold` is set as positive value: .. math:: out = \\begin{cases} x \\ast \\tanh(x), \\text{if } x > \\text{threshold} \\\\ x \\ast \\tanh(e^{x}), \\text{if } x < -\\text{threshold} \\\\ x \\ast \\tanh(\\ln(1 + e^{x})), \\text{otherwise} \\end{cases} Args: x (Variable): Input feature, multi-dimensional Tensor. The data type should be float16, float32 or float64. threshold (float|None): threshold for softplus in Mish operator. Approximate value of softplus will be used if absolute value of input is greater than :attr:threshold and :attr:threshold is set as positive value. For none or negative threshold, approximate value is not used. Default 20. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Variable: The output tensor with the same shape and data type as input. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np DATATYPE='float32' x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE) x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE) y = fluid.layers.mish(x) place = fluid.CPUPlace() # place = fluid.CUDAPlace(0) exe = fluid.Executor(place) out, = exe.run(feed={'x':x_data}, fetch_list=[y.name]) print(out) # [[0.66666667, 1.66666667, 3., 4.]] """ if in_dygraph_mode(): return _C_ops.mish(x, 'threshold', threshold) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'mish') check_type(threshold, 'threshold', (float, int), 'mish') assert threshold > 0, "threshold of mish should be greater than 0, " \ "but got {}".format(threshold) helper = LayerHelper('mish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mish', inputs={'X': x}, outputs={'Out': out}, attrs={'threshold': threshold}) return out def gather_tree(ids, parents): r""" To be used after beam search. After beam search, we get selected ids at each time step and the corresponding parents in the search tree. Both ids and parents have the layout :attr:`[max_time, batch_size, beam_size]`. Then :attr:`gather_tree` is used to backtrace from the last time step and generate the full sequences by collecting selected ids. Here is an example: .. code-block:: text Given: ids = [[[2 2] [6 1]] [[3 9] [6 1]] [[0 1] [9 0]]] parents = [[[0 0] [1 1]] [[1 0] [1 0]] [[0 0] [0 1]]] Then: gather_tree(ids, parents) = [[[2 2] [1 6]] [[3 3] [6 1]] [[0 1] [9 0]]] Args: ids(Tensor): A Tensor with shape :attr:`[length, batch_size, beam_size]` and data type :attr:`int32` or :attr:`int64`. It contains the selected ids of all time steps. parents(Tensor): A Tensor with the same shape and data type as :attr:`ids`, It contains the parents corresponding to selected ids when searching among beams. Returns: A Tensor with the same shape and data type as :attr:`ids`. \ It contains the full sequences. The sequences are collected from \ :attr:`ids` by backtracing according to :attr:`parents`. Examples: .. code-block:: python import paddle ids = paddle.to_tensor([[[2, 2], [6, 1]], [[3, 9], [6, 1]], [[0, 1], [9, 0]]]) parents = paddle.to_tensor([[[0, 0], [1, 1]], [[1, 0], [1, 0]], [[0, 0], [0, 1]]]) final_sequences = paddle.nn.functional.gather_tree(ids, parents) # [[[2, 2], [1, 6]], [[3, 3], [6, 1]], [[0, 1], [9, 0]]] """ if in_dygraph_mode(): return _C_ops.gather_tree(ids, parents) else: helper = LayerHelper('gather_tree', **locals()) check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree') check_variable_and_dtype(parents, 'parents', ['int32', 'int64'], 'gather_tree') out = helper.create_variable_for_type_inference(dtype=ids.dtype) helper.append_op( type="gather_tree", inputs={"Ids": ids, "Parents": parents}, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.uniform") @templatedoc() def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0, name=None): """ This OP returns a Tensor filled with random values sampled from a uniform distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``. Examples: :: Input: shape = [1, 2] Output: result=[[0.8505902, 0.8397286]] Args: shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape`` is a list or tuple, the elements of it should be integers or Tensors (with the shape [1], and the data type int32 or int64). If ``shape`` is a Tensor, it should be a 1-D Tensor(with the data type int32 or int64). dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of the output Tensor. Supported data types: float32, float64. Default is float32. min(float|int, optional): The lower bound on the range of random values to generate, ``min`` is included in the range. Default is -1.0. max(float|int, optional): The upper bound on the range of random values to generate, ``max`` is excluded in the range. Default is 1.0. seed(int, optional): Random seed used for generating samples. 0 means use a seed generated by the system. Note that if seed is not 0, this operator will always generate the same random numbers every time. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A Tensor filled with random values sampled from a uniform distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``. Raises: TypeError: If ``shape`` is not list, tuple, Tensor. TypeError: If ``dtype`` is not float32, float64. Examples: .. code-block:: python import paddle import paddle.fluid as fluid paddle.enable_static() # example 1: # attr shape is a list which doesn't contain Tensor. result_1 = fluid.layers.uniform_random(shape=[3, 4]) # [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], # [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # example 2: # attr shape is a list which contains Tensor. dim_1 = fluid.layers.fill_constant([1], "int64", 2) dim_2 = fluid.layers.fill_constant([1], "int32", 3) result_2 = fluid.layers.uniform_random(shape=[dim_1, dim_2]) # [[-0.9951253, 0.30757582, 0.9899647 ], # [ 0.5864527, 0.6607096, -0.8886161 ]] # example 3: # attr shape is a Tensor, the data type must be int64 or int32. var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64") result_3 = fluid.layers.uniform_random(var_shape) # if var_shape's value is [2, 3] # result_3 is: # [[-0.8517412, -0.4006908, 0.2551912 ], # [ 0.3364414, 0.36278176, -0.16085452]] """ if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) return _C_ops.uniform_random('shape', shape, 'min', float(min), 'max', float(max), 'seed', seed, 'dtype', dtype) check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand') check_dtype(dtype, 'dtype', ('float32', 'float64', 'uint16'), 'uniform_random/rand') inputs = dict() attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype} utils.get_shape_tensor_inputs( inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random/rand') helper = LayerHelper("uniform_random", **locals()) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="uniform_random", inputs=inputs, attrs=attrs, outputs={"Out": out}) utils.try_set_static_shape_tensor(out, shape) return out def unbind(input, axis=0): """ Removes a tensor dimension, then split the input tensor into multiple sub-Tensors. Args: input (Variable): The input variable which is an N-D Tensor, data type being float32, float64, int32 or int64. axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind. If :math:`axis < 0`, the dimension to unbind along is :math:`rank(input) + axis`. Default is 0. Returns: list(Variable): The list of segmented Tensor variables. Example: .. code-block:: python import paddle # input is a variable which shape is [3, 4, 5] input = paddle.fluid.data( name="input", shape=[3, 4, 5], dtype="float32") [x0, x1, x2] = paddle.tensor.unbind(input, axis=0) # x0.shape [4, 5] # x1.shape [4, 5] # x2.shape [4, 5] [x0, x1, x2, x3] = paddle.tensor.unbind(input, axis=1) # x0.shape [3, 5] # x1.shape [3, 5] # x2.shape [3, 5] # x3.shape [3, 5] """ helper = LayerHelper("unbind", **locals()) check_type(input, 'input', (Variable), 'unbind') dtype = helper.input_dtype() check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'], 'unbind') if not isinstance(axis, (int)): raise TypeError("The type of 'axis' must be int, but received %s." % (type(axis))) if isinstance(axis, np.generic): axis = np.asscalar(axis) input_shape = input.shape axis_ = axis if axis >= 0 else len(input_shape) + axis num = input_shape[axis_] outs = [ helper.create_variable_for_type_inference(dtype=helper.input_dtype()) for i in range(num) ] helper.append_op( type="unbind", inputs={"X": input}, outputs={"Out": outs}, attrs={"axis": axis}) return outs
39.662869
946
0.577482
from __future__ import print_function import os import inspect import warnings import numpy as np import six import paddle from ..layer_helper import LayerHelper from ..initializer import Normal, Constant, NumpyArrayInitializer from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags from .. import dygraph_utils from ..param_attr import ParamAttr from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor from . import utils from .. import unique_name from functools import reduce from .. import core from ...utils import deprecated from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype import paddle from paddle.utils import deprecated from paddle import _C_ops __all__ = [ 'fc', 'embedding', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'chunk_eval', 'conv2d', 'conv3d', 'softmax', 'pool2d', 'pool3d', 'adaptive_pool2d', 'adaptive_pool3d', 'batch_norm', 'inplace_abn', 'instance_norm', 'data_norm', 'conv2d_transpose', 'conv3d_transpose', 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'reduce_prod', 'reduce_all', 'reduce_any', 'dropout', 'split', 'ctc_greedy_decoder', 'l2_normalize', 'matmul', 'topk', 'transpose', 'im2sequence', 'row_conv', 'multiplex', 'layer_norm', 'group_norm', 'spectral_norm', 'smooth_l1', 'one_hot', 'autoincreased_step_counter', 'reshape', 'squeeze', 'unsqueeze', 'lod_reset', 'lod_append', 'lrn', 'pad', 'pad_constant_like', 'label_smooth', 'roi_pool', 'roi_align', 'dice_loss', 'image_resize', 'image_resize_short', 'resize_linear', 'resize_bilinear', 'resize_trilinear', 'resize_nearest', 'gather', 'gather_nd', 'scatter', 'scatter_nd_add', 'scatter_nd', 'random_crop', 'mean_iou', 'relu', 'selu', 'log', 'crop', 'crop_tensor', 'elu', 'relu6', 'pow', 'stanh', 'hard_sigmoid', 'swish', 'prelu', 'brelu', 'leaky_relu', 'soft_relu', 'flatten', 'stack', 'pad2d', 'unstack', 'unique', 'unique_with_counts', 'expand', 'expand_as', 'scale', 'elementwise_add', 'elementwise_div', 'elementwise_sub', 'elementwise_mul', 'elementwise_max', 'elementwise_min', 'elementwise_pow', 'elementwise_mod', 'elementwise_floordiv', 'uniform_random_batch_size_like', 'gaussian_random', 'sampling_id', 'gaussian_random_batch_size_like', 'sum', 'slice', 'strided_slice', 'shape', 'rank', 'size', 'logical_and', 'logical_or', 'logical_xor', 'logical_not', 'clip', 'clip_by_norm', 'mean', 'mul', 'maxout', 'space_to_depth', 'affine_grid', 'affine_channel', 'similarity_focus', 'hash', 'grid_sampler', 'log_loss', 'add_position_encoding', 'bilinear_tensor_product', 'merge_selected_rows', 'get_tensor_from_selected_rows', 'shuffle_channel', 'temporal_shift', 'py_func', 'psroi_pool', 'prroi_pool', 'pixel_shuffle', 'fsp_matrix', 'continuous_value_model', 'where', 'sign', 'deformable_conv', 'unfold', 'deformable_roi_pooling', 'filter_by_instag', 'shard_index', 'hard_swish', 'mish', 'gather_tree', 'uniform_random', 'unbind', ] @dygraph_only def _elementwise_op_in_dygraph(x, y, axis=-1, act=None, use_mkldnn=False, op_name=None): op = getattr(_C_ops, op_name) out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) return dygraph_utils._append_activation_in_dygraph( out, act, use_mkldnn=use_mkldnn) def fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None): helper = LayerHelper("fc", **locals()) check_type(input, 'input', (list, tuple, Variable), 'fc') if isinstance(input, (list, tuple)): for i, input_x in enumerate(input): check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc') dtype = helper.input_dtype() check_dtype(dtype, 'input', ['float16', 'uint16', 'float32', 'float64'], 'fc') mul_results = [] for input_var, param_attr in helper.iter_inputs_and_params(): input_shape = input_var.shape if num_flatten_dims == -1: num_flatten_dims = len(input_shape) - 1 param_shape = [ reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) ] + [size] w = helper.create_parameter( attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) tmp = helper.create_variable_for_type_inference(dtype) helper.append_op( type="mul", inputs={"X": input_var, "Y": w}, outputs={"Out": tmp}, attrs={"x_num_col_dims": num_flatten_dims, "y_num_col_dims": 1}) mul_results.append(tmp) if len(mul_results) == 1: pre_bias = mul_results[0] else: pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}, attrs={"use_mkldnn": False}) pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims) return helper.append_activation(pre_activation) @deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding") def embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32'): helper = LayerHelper('embedding', **locals()) check_variable_and_dtype(input, 'input', ['int64'], 'fluid.layers.embedding') check_dtype(dtype, 'dtype', ['uint16', 'float16', 'float32', 'float64'], 'fluid.layers.embedding') if is_distributed: is_distributed = False warnings.warn( "is_distributed is go out of use, `fluid.contrib.layers.sparse_embedding` is your needed" ) remote_prefetch = True if is_sparse else False w = helper.create_parameter( attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False) tmp = helper.create_variable_for_type_inference(dtype) padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( size[0] + padding_idx) helper.append_op( type='lookup_table', inputs={'Ids': input, 'W': w}, outputs={'Out': tmp}, attrs={ 'is_sparse': is_sparse, 'is_distributed': is_distributed, 'remote_prefetch': remote_prefetch, 'padding_idx': padding_idx }) return tmp def _pull_sparse(input, size, table_id, accessor_class, name="embedding", ctr_label_name="", padding_id=0, dtype='float32', scale_sparse_grad=True): helper = LayerHelper(name, **locals()) inputs = helper.multiple_input() outs = [helper.create_variable_for_type_inference(dtype)] input_names = [i.name for i in inputs] attrs = { 'EmbeddingDim': size, 'TableId': table_id, 'AccessorClass': accessor_class, 'CtrLabelName': ctr_label_name, 'PaddingId': padding_id, 'ScaleSparseGrad': scale_sparse_grad, 'InputNames': input_names, 'is_distributed': True } w, _ = helper.create_or_get_global_variable( name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True) helper.append_op( type='pull_sparse', inputs={'Ids': inputs, 'W': w}, outputs={'Out': outs}, attrs=attrs) if len(outs) == 1: return outs[0] return outs def _pull_sparse_v2(input, size, table_id, accessor_class, name="embedding", ctr_label_name="", padding_id=0, dtype='float32', scale_sparse_grad=True): helper = LayerHelper(name, **locals()) inputs = helper.multiple_input() outs = [helper.create_variable_for_type_inference(dtype)] input_names = [i.name for i in inputs] attrs = { 'EmbeddingDim': size, 'TableId': table_id, 'AccessorClass': accessor_class, 'CtrLabelName': ctr_label_name, 'PaddingId': padding_id, 'ScaleSparseGrad': scale_sparse_grad, 'InputNames': input_names, 'is_distributed': True } w, _ = helper.create_or_get_global_variable( name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True) helper.append_op( type='pull_sparse_v2', inputs={'Ids': inputs, 'W': w}, outputs={'Out': outs}, attrs=attrs) if len(outs) == 1: return outs[0] return outs def _pull_gpups_sparse(input, size, dtype='float32', is_distributed=False, is_sparse=False): helper = LayerHelper('pull_gpups_sparse', **locals()) if dtype != 'float32': raise ValueError( "GpuPS only support float type embedding now, and your type is: " + dtype) helper.input_dtype() inputs = helper.multiple_input() outs = [ helper.create_variable_for_type_inference(dtype) for i in range(len(inputs)) ] w = helper.create_parameter( attr=helper.param_attr, shape=[11], dtype=dtype, is_bias=False) helper.append_op( type='pull_gpups_sparse', inputs={'Ids': inputs, 'W': w}, outputs={'Out': outs}, attrs={ 'size': size, 'is_distributed': is_distributed, 'is_sparse': is_sparse }) if len(outs) == 1: return outs[0] return outs def _pull_box_sparse(input, size, dtype='float32', is_distributed=False, is_sparse=False): helper = LayerHelper('pull_box_sparse', **locals()) if dtype != 'float32': raise ValueError( "BoxPS only support float type embedding now, and your type is: " + dtype) helper.input_dtype() inputs = helper.multiple_input() outs = [ helper.create_variable_for_type_inference(dtype) for i in range(len(inputs)) ] w = helper.create_parameter( attr=helper.param_attr, shape=[size], dtype=dtype, is_bias=False) helper.append_op( type='pull_box_sparse', inputs={'Ids': inputs, 'W': w}, outputs={'Out': outs}, attrs={ 'size': size, 'is_distributed': is_distributed, 'is_sparse': is_sparse }) if len(outs) == 1: return outs[0] return outs @templatedoc() def linear_chain_crf(input, label, param_attr=None, length=None): check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'linear_chain_crf') check_variable_and_dtype(label, 'label', ['int64'], 'linear_chain_crf') helper = LayerHelper('linear_chain_crf', **locals()) size = input.shape[2] if length else input.shape[1] transition = helper.create_parameter( attr=helper.param_attr, shape=[size + 2, size], dtype=helper.input_dtype()) alpha = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) emission_exps = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) transition_exps = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) log_likelihood = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) this_inputs = { "Emission": [input], "Transition": transition, "Label": [label] } if length: this_inputs['Length'] = [length] helper.append_op( type='linear_chain_crf', inputs=this_inputs, outputs={ "Alpha": [alpha], "EmissionExps": [emission_exps], "TransitionExps": transition_exps, "LogLikelihood": log_likelihood }) return log_likelihood @templatedoc() def crf_decoding(input, param_attr, label=None, length=None): check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'crf_decoding') helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) viterbi_path = helper.create_variable_for_type_inference( dtype=core.VarDesc.VarType.INT64) inputs = {"Emission": [input], "Transition": transition, "Label": label} if length: inputs['Length'] = length helper.append_op( type='crf_decoding', inputs=inputs, outputs={"ViterbiPath": [viterbi_path]}) return viterbi_path @templatedoc() def cos_sim(X, Y): check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim') check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim') helper = LayerHelper('cos_sim', **locals()) out = helper.create_variable_for_type_inference(dtype=X.dtype) xnorm = helper.create_variable_for_type_inference(dtype=X.dtype) ynorm = helper.create_variable_for_type_inference(dtype=X.dtype) helper.append_op( type='cos_sim', inputs={'X': [X], 'Y': [Y]}, outputs={'Out': [out], 'XNorm': [xnorm], 'YNorm': [ynorm]}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.dropout") def dropout(x, dropout_prob, is_test=None, seed=None, name=None, dropout_implementation="downgrade_in_infer"): if dropout_prob == 0: return x if in_dygraph_mode(): if (seed is None or seed == 0) and default_main_program().random_seed != 0: seed = default_main_program().random_seed if is_test is None: is_test = not _dygraph_tracer()._train_mode out, mask = _C_ops.dropout( x, 'dropout_prob', dropout_prob, 'is_test', is_test, 'fix_seed', seed is not None, 'seed', seed if seed is not None else 0, 'dropout_implementation', dropout_implementation) return out def get_attrs(prog, dropout_prob, is_test, seed): if (seed is None or seed == 0) and prog.random_seed != 0: seed = prog.random_seed attrs = { 'dropout_prob': dropout_prob, 'is_test': is_test, 'fix_seed': seed is not None, 'seed': seed if seed is not None else 0, 'dropout_implementation': dropout_implementation, } return attrs helper = LayerHelper('dropout', **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'dropout') out = helper.create_variable_for_type_inference(dtype=x.dtype) mask = helper.create_variable_for_type_inference( dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed) helper.append_op( type='dropout', inputs={'X': [x]}, outputs={'Out': [out], 'Mask': [mask]}, attrs=attrs) return out @templatedoc() def chunk_eval(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None, seq_length=None): helper = LayerHelper("chunk_eval", **locals()) check_variable_and_dtype(input, 'input', ['int64'], 'chunk_eval') check_variable_and_dtype(label, 'label', ['int64'], 'chunk_eval') precision = helper.create_variable_for_type_inference(dtype="float32") recall = helper.create_variable_for_type_inference(dtype="float32") f1_score = helper.create_variable_for_type_inference(dtype="float32") num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64") num_label_chunks = helper.create_variable_for_type_inference(dtype="int64") num_correct_chunks = helper.create_variable_for_type_inference( dtype="int64") this_input = {"Inference": [input], "Label": [label]} if seq_length is not None: this_input["SeqLength"] = [seq_length] helper.append_op( type="chunk_eval", inputs=this_input, outputs={ "Precision": [precision], "Recall": [recall], "F1-Score": [f1_score], "NumInferChunks": [num_infer_chunks], "NumLabelChunks": [num_label_chunks], "NumCorrectChunks": [num_correct_chunks] }, attrs={ "num_chunk_types": num_chunk_types, "chunk_scheme": chunk_scheme, "excluded_chunk_types": excluded_chunk_types or [] }) return (precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks) @deprecated(since="2.0.0", update_to="paddle.nn.functional.softmax") def softmax(input, use_cudnn=True, name=None, axis=-1): if in_dygraph_mode(): return _C_ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn) inputs = {"X": [input]} attrs = {"axis": axis, "use_cudnn": use_cudnn} helper = LayerHelper('softmax', **locals()) check_variable_and_dtype(input, 'input/x', ['float16', 'float32', 'float64'], 'softmax') dtype = helper.input_dtype() softmax_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="softmax", inputs={"X": input}, outputs={"Out": softmax_out}, attrs=attrs) return softmax_out def conv2d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCHW"): check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'conv2d') if len(input.shape) != 4: raise ValueError("Input size should be 4, " "but received {}".format(len(input.shape))) num_channels = input.shape[1] if not isinstance(use_cudnn, bool): raise ValueError("Attr(use_cudnn) should be True or False. Received " "Attr(use_cudnn): %s. " % str(use_cudnn)) if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " "Attr(data_format): %s." % str(data_format)) channel_last = (data_format == "NHWC") num_channels = input.shape[3] if channel_last else input.shape[1] if num_channels < 0: raise ValueError( "The channel dimmention of the input(%s) should be defined. " "Received: %s." % (str(input.shape), str(num_channels))) assert param_attr is not False, "param_attr should not be False here." if groups is None: num_filter_channels = num_channels elif groups <= 0: raise ValueError("the groups of input must be greater than 0, " "but received the groups of input is {}".format( groups)) else: if num_channels % groups != 0: raise ValueError( "the channel of input must be divisible by groups," "received: the channel of input is {}, the shape of input is {}" ", the groups is {}".format(num_channels, input.shape, groups)) num_filter_channels = num_channels // groups l_type = 'conv2d' if (num_channels == groups and num_filters % num_channels == 0 and not use_cudnn): l_type = 'depthwise_conv2d' if (num_channels == groups and num_filters % num_channels == 0 and core.is_compiled_with_rocm()): l_type = 'depthwise_conv2d' if core.is_compiled_with_npu(): if (num_channels == groups and num_channels == num_filters): l_type = 'depthwise_conv2d' else: l_type = 'conv2d' helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') stride = utils.convert_to_list(stride, 2, 'stride') dilation = utils.convert_to_list(dilation, 2, 'dilation') def _update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 4: if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:3] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 4, 'padding') if utils._is_symmetric_padding(padding, 2): padding = [padding[0], padding[2]] else: padding = utils.convert_to_list(padding, 2, 'padding') return padding padding_algorithm = "EXPLICIT" if isinstance(padding, str): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % str(padding)) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0] elif padding == "SAME": padding_algorithm = "SAME" padding = [0, 0] padding = _update_padding(padding, data_format) filter_shape = [num_filters, int(num_filter_channels)] + filter_size def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * num_channels if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " "filter size.".format(filter_elem_num)) std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, default_initializer=_get_default_param_initializer()) pre_bias = helper.create_variable_for_type_inference(dtype) if (core.is_compiled_with_cuda() and paddle.fluid.get_flags( "FLAGS_conv2d_disable_cudnn")["FLAGS_conv2d_disable_cudnn"]): use_cudnn = False helper.append_op( type=l_type, inputs={ 'Input': input, 'Filter': filter_param, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, 'use_mkldnn': False, 'fuse_relu_before_depthwise_conv': False, "padding_algorithm": padding_algorithm, "data_format": data_format, }) if data_format == 'NCHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) else: pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4) return helper.append_activation(pre_act) def conv3d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCDHW"): l_type = 'conv3d' assert param_attr is not False, "param_attr should not be False here." helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() if not isinstance(use_cudnn, bool): raise ValueError("Attr(use_cudnn) should be True or False. Received " "Attr(use_cudnn): %s. " % str(use_cudnn)) if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " "Attr(data_format): %s." % str(data_format)) channel_last = (data_format == "NDHWC") if len(input.shape) != 5: raise ValueError( "Input should be 5D tensor, but received input with the shape of {}". format(input.shape)) num_channels = input.shape[4] if channel_last else input.shape[1] if num_channels < 0: raise ValueError( "The channel dimmention of the input(%s) should be defined. " "Received: %s." % (str(input.shape), str(num_channels))) if groups is None: num_filter_channels = num_channels elif groups <= 0: raise ValueError( "the groups of conv3d should be greater than 0. Received groups: {}". format(groups)) else: if num_channels % groups != 0: raise ValueError( "The number of input channels must be divisible by Attr(groups). " "Received: number of channels(%s), groups(%s)." % (str(num_channels), str(groups))) num_filter_channels = num_channels // groups filter_size = utils.convert_to_list(filter_size, 3, 'filter_size') stride = utils.convert_to_list(stride, 3, 'stride') dilation = utils.convert_to_list(dilation, 3, 'dilation') def _update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 5: if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:4] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 6, 'padding') if utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] elif is_list_or_tuple(padding) and len(padding) == 6: padding = utils.convert_to_list(padding, 6, 'padding') if utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] else: padding = utils.convert_to_list(padding, 3, 'padding') return padding padding_algorithm = "EXPLICIT" if isinstance(padding, str): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % str(padding)) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0, 0] elif padding == "SAME": padding_algorithm = "SAME" padding = [0, 0, 0] padding = _update_padding(padding, data_format) input_shape = input.shape filter_shape = [num_filters, num_filter_channels] + filter_size def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * filter_size[ 2] * num_channels if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " "filter size.".format(filter_elem_num)) std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, default_initializer=_get_default_param_initializer()) pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, inputs={ 'Input': input, 'Filter': filter_param, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, 'use_mkldnn': False, "padding_algorithm": padding_algorithm, "data_format": data_format, }) if data_format == 'NCDHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) else: pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5) return helper.append_activation(pre_act) @templatedoc() def pool2d(input, pool_size=-1, pool_type="max", pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCHW"): if pool_type not in ["max", "avg"]: raise ValueError( "Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.", str(pool_type)) if global_pooling is False and pool_size == -1: raise ValueError( "When Attr(global_pooling) is False, Attr(pool_size) must be passed " "and be a valid value. Received pool_size: %s." % str(pool_size)) if not isinstance(use_cudnn, bool): raise TypeError("Attr(use_cudnn) should be True or False. Received " "Attr(use_cudnn): %s." % str(use_cudnn)) if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " "Attr(data_format): %s." % str(data_format)) pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride') def update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 4: if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:3] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 4, 'padding') if utils._is_symmetric_padding(padding, 2): padding = [padding[0], padding[2]] else: padding = utils.convert_to_list(padding, 2, 'padding') return padding padding_algorithm = "EXPLICIT" if isinstance(pool_padding, str): pool_padding = pool_padding.upper() if pool_padding not in ["SAME", "VALID"]: raise ValueError( "Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'." % str(pool_padding)) if pool_padding == "VALID": padding_algorithm = "VALID" pool_padding = [0, 0] if ceil_mode != False: raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode) must be False. " "Received ceil_mode: True.") elif pool_padding == "SAME": padding_algorithm = "SAME" pool_padding = [0, 0] pool_padding = update_padding(pool_padding, data_format) op_type = 'pool2d' helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=op_type, inputs={"X": input}, outputs={"Out": pool_out}, attrs={ "pooling_type": pool_type, "ksize": pool_size, "global_pooling": global_pooling, "strides": pool_stride, "paddings": pool_padding, "padding_algorithm": padding_algorithm, "use_cudnn": use_cudnn, "ceil_mode": ceil_mode, "use_mkldnn": False, "exclusive": exclusive, "data_format": data_format, }) return pool_out @templatedoc() def pool3d(input, pool_size=-1, pool_type="max", pool_stride=1, pool_padding=0, global_pooling=False, use_cudnn=True, ceil_mode=False, name=None, exclusive=True, data_format="NCDHW"): if pool_type not in ["max", "avg"]: raise ValueError( "Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.", str(pool_type)) if global_pooling is False and pool_size == -1: raise ValueError( "When Attr(global_pooling) is False, Attr(pool_size) must be passed " "and be a valid value. Received Attr(pool_size): %s." % str(pool_size)) if not isinstance(use_cudnn, bool): raise TypeError("Attr(use_cudnn) should be True or False. Received " "Attr(use_cudnn): %s. " % str(use_cudnn)) if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " "Attr(data_format): %s" % str(data_format)) pool_size = utils.convert_to_list(pool_size, 3, 'pool_size') pool_stride = utils.convert_to_list(pool_stride, 3, 'pool_stride') def update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, (list, tuple)): return True return False if is_list_or_tuple(padding) and len(padding) == 5: if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero pool_padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:4] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 6, 'padding') if utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] elif is_list_or_tuple(padding) and len(padding) == 6: padding = utils.convert_to_list(padding, 6, 'padding') if utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] else: padding = utils.convert_to_list(padding, 3, 'padding') return padding padding_algorithm = "EXPLICIT" if isinstance(pool_padding, str): pool_padding = pool_padding.upper() if pool_padding not in ["SAME", "VALID"]: raise ValueError( "Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'." % str(pool_padding)) if pool_padding == "VALID": padding_algorithm = "VALID" pool_padding = [0, 0, 0] if ceil_mode != False: raise ValueError( "When Attr(pool_padding) is \"VALID\", ceil_mode must be False. " "Received ceil_mode: True.") elif pool_padding == "SAME": padding_algorithm = "SAME" pool_padding = [0, 0, 0] pool_padding = update_padding(pool_padding, data_format) op_type = "pool3d" helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=op_type, inputs={"X": input}, outputs={"Out": pool_out}, attrs={ "pooling_type": pool_type, "ksize": pool_size, "global_pooling": global_pooling, "strides": pool_stride, "paddings": pool_padding, "padding_algorithm": padding_algorithm, "use_cudnn": use_cudnn, "ceil_mode": ceil_mode, "use_mkldnn": False, "exclusive": exclusive, "data_format": data_format, }) return pool_out @deprecated(since="2.0.0") @templatedoc(op_type="pool2d") def adaptive_pool2d(input, pool_size, pool_type="max", require_index=False, name=None): check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'adaptive_pool2d') check_type(pool_type, 'pool_type', str, 'adaptive_pool2d') check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d') check_type(require_index, 'require_index', bool, 'adaptive_pool2d') if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", str(pool_type)) if pool_type == "avg" and require_index: raise ValueError( "invalid setting 'require_index' true when 'pool_type' is 'avg'.") pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') if pool_type == "max": l_type = 'max_pool2d_with_index' else: l_type = "pool2d" helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) outputs = {"Out": pool_out} if pool_type == "max": mask = helper.create_variable_for_type_inference(dtype) outputs["Mask"] = mask helper.append_op( type=l_type, inputs={"X": input}, outputs=outputs, attrs={ "pooling_type": pool_type, "ksize": pool_size, "adaptive": True, }) return (pool_out, mask) if require_index else pool_out @deprecated(since="2.0.0") @templatedoc(op_type="pool3d") def adaptive_pool3d(input, pool_size, pool_type="max", require_index=False, name=None): check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'adaptive_pool3d') check_type(pool_type, 'pool_type', str, 'adaptive_pool3d') check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d') check_type(require_index, 'require_index', bool, 'adaptive_pool3d') if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", str(pool_type)) if pool_type == "avg" and require_index: raise ValueError( "invalid setting 'require_index' true when 'pool_type' is 'avg'.") pool_size = utils.convert_to_list(pool_size, 3, 'pool_size') if pool_type == "max": l_type = 'max_pool3d_with_index' else: l_type = "pool3d" helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) outputs = {"Out": pool_out} if pool_type == "max": mask = helper.create_variable_for_type_inference(dtype) outputs["Mask"] = mask helper.append_op( type=l_type, inputs={"X": input}, outputs=outputs, attrs={ "pooling_type": pool_type, "ksize": pool_size, "adaptive": True, }) return (pool_out, mask) if require_index else pool_out def batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=True, use_global_stats=False): assert bias_attr is not False, "bias_attr should not be False in batch_norm." helper = LayerHelper('batch_norm', **locals()) check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'batch_norm') dtype = helper.input_dtype() if dtype == core.VarDesc.VarType.FP16: dtype = core.VarDesc.VarType.FP32 input_shape = input.shape if data_layout == 'NCHW': channel_num = input_shape[1] else: if data_layout == 'NHWC': channel_num = input_shape[-1] else: raise ValueError("unsupported data layout:" + data_layout) param_shape = [channel_num] scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) mean = helper.create_parameter( attr=ParamAttr( name=moving_mean_name, initializer=Constant(0.0), trainable=False, do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=dtype) mean.stop_gradient = True variance = helper.create_parameter( attr=ParamAttr( name=moving_variance_name, initializer=Constant(1.0), trainable=False, do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=dtype) variance.stop_gradient = True mean_out = mean variance_out = variance saved_mean = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) saved_variance = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) reserve_space = None if not is_test: reserve_space = helper.create_variable_for_type_inference( dtype=helper.input_dtype(), stop_gradient=True) batch_norm_out = input if in_place else \ helper.create_variable_for_type_inference(dtype) inputs = { "X": input, "Scale": scale, "Bias": bias, "Mean": mean, "Variance": variance } attrs = { "epsilon": epsilon, "is_test": is_test, "data_layout": data_layout, "use_mkldnn": False, "fuse_with_relu": False, "use_global_stats": use_global_stats } if isinstance(momentum, Variable): inputs['MomemtumTensor'] = momentum else: attrs['momentum'] = momentum outputs = { "Y": batch_norm_out, "MeanOut": mean_out, "VarianceOut": variance_out, "SavedMean": saved_mean, "SavedVariance": saved_variance } if reserve_space is not None: outputs["ReserveSpace"] = reserve_space helper.append_op( type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs) return helper.append_activation(batch_norm_out) def inplace_abn(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=True, use_global_stats=False, act_alpha=1.0): assert act in [None, 'identity', 'leaky_relu', 'elu'], \ "inplace_abn only support act as None, 'identity', " \ "'leaky_relu', 'elu' currently" assert bias_attr is not False, "bias_attr should not be False in inplace_abn." helper = LayerHelper('inplace_abn', **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'inplace_abn') dtype = helper.input_dtype() input_shape = input.shape if data_layout == 'NCHW': channel_num = input_shape[1] else: if data_layout == 'NHWC': channel_num = input_shape[-1] else: raise ValueError("unsupported data layout:" + data_layout) param_shape = [channel_num] scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) mean = helper.create_parameter( attr=ParamAttr( name=moving_mean_name, initializer=Constant(0.0), trainable=False, do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=dtype) mean.stop_gradient = True variance = helper.create_parameter( attr=ParamAttr( name=moving_variance_name, initializer=Constant(1.0), trainable=False, do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=dtype) variance.stop_gradient = True mean_out = mean variance_out = variance saved_mean = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) saved_variance = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) reserve_space = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) batch_norm_out = input inputs = { "X": input, "Scale": scale, "Bias": bias, "Mean": mean, "Variance": variance } attrs = { "epsilon": epsilon, "is_test": is_test, "data_layout": data_layout, "use_mkldnn": False, "fuse_with_relu": False, "use_global_stats": use_global_stats, "activation": act, "alpha": act_alpha, } if isinstance(momentum, Variable): inputs['MomemtumTensor'] = momentum else: attrs['momentum'] = momentum outputs = { "Y": batch_norm_out, "MeanOut": mean_out, "VarianceOut": variance_out, "SavedMean": saved_mean, "SavedVariance": saved_variance } if reserve_space is not None: outputs["ReserveSpace"] = reserve_space helper.append_op( type="inplace_abn", inputs=inputs, outputs=outputs, attrs=attrs) return batch_norm_out def instance_norm(input, epsilon=1e-05, param_attr=None, bias_attr=None, name=None): check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'instance_norm') if param_attr is False: assert bias_attr is False, "param_attr and bias_attr must be set to Fasle at the same time in instance_norm" helper = LayerHelper('instance_norm', **locals()) dtype = helper.input_dtype() if dtype == core.VarDesc.VarType.FP16: dtype = core.VarDesc.VarType.FP32 input_shape = input.shape if len(input.shape) < 2 or len(input.shape) > 5: raise ValueError( 'expected 2D or 3D or 4D or 5D input (got {}D input, input shape is: {})'. format(len(input.shape), input_shape)) channel_num = input_shape[1] param_shape = [channel_num] if param_attr != False and bias_attr != False: scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True, default_initializer=Constant(0.0)) saved_mean = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) saved_variance = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) instance_norm_out = helper.create_variable_for_type_inference(dtype) inputs = {"X": input} if param_attr != False and bias_attr != False: inputs["Scale"] = scale inputs["Bias"] = bias helper.append_op( type="instance_norm", inputs=inputs, outputs={ "Y": instance_norm_out, "SavedMean": saved_mean, "SavedVariance": saved_variance }, attrs={"epsilon": epsilon, }) return instance_norm_out @static_only def data_norm(input, act=None, epsilon=1e-05, param_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=True, slot_dim=-1, sync_stats=False, summary_decay_rate=0.9999999, enable_scale_and_shift=False): helper = LayerHelper('data_norm', **locals()) dtype = helper.input_dtype() input_shape = input.shape if data_layout == 'NCHW': channel_num = input_shape[1] else: if data_layout == 'NHWC': channel_num = input_shape[-1] else: raise ValueError("unsupported data layout:" + data_layout) param_shape = [channel_num] batch_size_default = 1e4 batch_sum_default = 0.0 batch_square_sum_default = 1e4 scale_w_default = 1.0 bias_default = 0.0 if param_attr and isinstance(param_attr, dict): batch_size_default = param_attr.get("batch_size", 1e4) batch_sum_default = param_attr.get("batch_sum", 0.0) batch_square_sum_default = param_attr.get("batch_square", 1e4) if enable_scale_and_shift: scale_w_default = param_attr.get("scale_w", 1.0) bias_default = param_attr.get("bias", 0.0) if name == None: name = "dn" if enable_scale_and_shift: scale_w = helper.create_parameter( attr=ParamAttr( name=name + '.scale_w', initializer=Constant(value=float(scale_w_default)), trainable=True), shape=param_shape, dtype=input.dtype) bias = helper.create_parameter( attr=ParamAttr( name=name + '.bias', initializer=Constant(value=float(bias_default)), trainable=True), shape=param_shape, dtype=input.dtype) batch_size = helper.create_parameter( attr=ParamAttr( name=name + '.batch_size', initializer=Constant(value=float(batch_size_default)), trainable=True), shape=param_shape, dtype=input.dtype) batch_sum = helper.create_parameter( attr=ParamAttr( name=name + '.batch_sum', initializer=Constant(value=float(batch_sum_default)), trainable=True), shape=param_shape, dtype=input.dtype) batch_square_sum = helper.create_parameter( attr=ParamAttr( name=name + '.batch_square_sum', initializer=Constant(value=float(batch_square_sum_default)), trainable=True), shape=param_shape, dtype=input.dtype) means = helper.create_variable(dtype=dtype, stop_gradient=True) scales = helper.create_variable(dtype=dtype, stop_gradient=True) data_norm_out = input if in_place else helper.create_variable(dtype=dtype) inputs = { "X": input, "BatchSize": batch_size, "BatchSum": batch_sum, "BatchSquareSum": batch_square_sum } attrs = { "epsilon": epsilon, "data_layout": data_layout, "sync_stats": sync_stats, "summary_decay_rate": summary_decay_rate, } if slot_dim > 0: attrs["slot_dim"] = slot_dim if enable_scale_and_shift: attrs["enable_scale_and_shift"] = enable_scale_and_shift if enable_scale_and_shift: inputs["scale_w"] = scale_w inputs["bias"] = bias helper.append_op( type="data_norm", inputs=inputs, outputs={ "Y": data_norm_out, "Means": means, "Scales": scales, "BatchSize": batch_size, "BatchSum": batch_sum, "BatchSquareSum": batch_square_sum }, attrs=attrs) return helper.append_activation(data_norm_out) @templatedoc() def layer_norm(input, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, name=None): assert in_dygraph_mode( ) is not True, "please use LayerNorm instead of layer_norm in dygraph mode!" helper = LayerHelper('layer_norm', **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'layer_norm') dtype = helper.input_dtype() inputs = {'X': input} input_shape = input.shape param_shape = [reduce(lambda x, y: x * y, input_shape[begin_norm_axis:])] if scale: assert param_attr is not False, "param_attr should not be False when using scale." scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) inputs['Scale'] = scale else: if param_attr: warnings.warn("param_attr is only available with scale is True.") if shift: assert bias_attr is not False, "bias_attr should not be False when using shift." bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) inputs['Bias'] = bias else: if bias_attr: warnings.warn("bias_attr is only available with shift is True.") mean_out = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) variance_out = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) layer_norm_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="layer_norm", inputs=inputs, outputs={ "Y": layer_norm_out, "Mean": mean_out, "Variance": variance_out, }, attrs={"epsilon": epsilon, "begin_norm_axis": begin_norm_axis}) return helper.append_activation(layer_norm_out) @templatedoc() def group_norm(input, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', name=None): helper = LayerHelper('group_norm', **locals()) dtype = helper.input_dtype() check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'group_norm') inputs = {'X': input} input_shape = input.shape if len(input_shape) < 2: raise ValueError( f"The dimensions of Op(fluid.layers.group_norm)'s input should be more than 1. But received {len(input_shape)}" ) if data_layout != 'NCHW' and data_layout != 'NHWC': raise ValueError( "Param(data_layout) of Op(fluid.layers.group_norm) got wrong value: received " + data_layout + " but only NCHW or NHWC supported.") channel_num = input_shape[1] if data_layout == 'NCHW' else input_shape[-1] param_shape = [channel_num] if param_attr: scale = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, default_initializer=Constant(1.0)) inputs['Scale'] = scale if bias_attr: bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) inputs['Bias'] = bias # create output mean_out = helper.create_variable(dtype=dtype, stop_gradient=True) variance_out = helper.create_variable(dtype=dtype, stop_gradient=True) group_norm_out = helper.create_variable(dtype=dtype) helper.append_op( type="group_norm", inputs=inputs, outputs={ "Y": group_norm_out, "Mean": mean_out, "Variance": variance_out, }, attrs={ "epsilon": epsilon, "groups": groups, "data_layout": data_layout }) return helper.append_activation(group_norm_out) @templatedoc() def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): helper = LayerHelper('spectral_norm', **locals()) check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], 'spectral_norm') check_type(dim, 'dim', int, 'spectral_norm') check_type(power_iters, 'power_iters', int, 'spectral_norm') check_type(eps, 'eps', float, 'spectral_norm') dtype = weight.dtype # create intput and parameters inputs = {'Weight': weight} input_shape = weight.shape assert weight.numel() > 0, "Any dimension of input cannot be equal to 0." assert dim < len(input_shape), ("The input `dim` should be less than the " "rank of `weight`, but received dim=" "{}".format(dim)) h = input_shape[dim] w = np.prod(input_shape) // h u = helper.create_parameter( attr=ParamAttr(), shape=[h], dtype=dtype, default_initializer=Normal(0., 1.)) u.stop_gradient = True inputs['U'] = u v = helper.create_parameter( attr=ParamAttr(), shape=[w], dtype=dtype, default_initializer=Normal(0., 1.)) inputs['V'] = v v.stop_gradient = True # create output out = helper.create_variable(dtype=dtype) helper.append_op( type="spectral_norm", inputs=inputs, outputs={"Out": out, }, attrs={ "dim": dim, "power_iters": power_iters, "eps": eps, }) return out def conv2d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCHW'): assert param_attr is not False, "param_attr should not be False in conv2d_transpose." if len(input.shape) != 4: raise ValueError("Input size should be 4, " "but received {}".format(len(input.shape))) if data_format not in ['NCHW', 'NHWC']: raise ValueError( "Attr(data_format) of Op(fluid.layers.conv2d_transpose) got wrong value: received " + data_format + " but only NCHW or NHWC supported.") input_channel = input.shape[1] if data_format == 'NCHW' else input.shape[-1] op_type = 'conv2d_transpose' if (input_channel == groups and num_filters == input_channel and not use_cudnn): op_type = 'depthwise_conv2d_transpose' helper = LayerHelper(op_type, **locals()) if not isinstance(input, Variable): raise TypeError("Input of conv2d_transpose must be Variable") stride = utils.convert_to_list(stride, 2, 'stride') dilation = utils.convert_to_list(dilation, 2, 'dilation') if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") def _update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 4: if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:4] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): if not (padding[0] == [0, 0] and padding[3] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:3] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 4, 'padding') else: padding = utils.convert_to_list(padding, 2, 'padding') padding = [padding[0], padding[0], padding[1], padding[1]] return padding padding_algorithm = "EXPLICIT" if isinstance(padding, str): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % str(padding)) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0, 0, 0] elif padding == "SAME": padding_algorithm = "SAME" padding = [0, 0, 0, 0] padding = _update_padding(padding, data_format) if filter_size is None: if output_size is None: raise ValueError("output_size must be set when filter_size is None") if isinstance(output_size, int): output_size = [output_size, output_size] h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1] w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2] filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + padding[0] + padding[1] - 1) // dilation[0] + 1 filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + padding[2] + padding[3] - 1) // dilation[1] + 1 filter_size = [filter_size_h, filter_size_w] else: filter_size = utils.convert_to_list(filter_size, 2, 'conv2d_transpose.filter_size') if len(padding) == 4 and utils._is_symmetric_padding(padding, 2): padding = [padding[0], padding[2]] if output_size is None: output_size = [] elif isinstance(output_size, (list, tuple, int)): output_size = utils.convert_to_list(output_size, 2, 'output_size') else: raise ValueError("output_size should be int, list[int] or tuple[int]") if groups is None: groups = 1 elif groups <= 0: raise ValueError("the groups of input must be greater than 0, " "but received the groups of input is {}".format( groups)) filter_shape = [input_channel, num_filters // groups] + filter_size img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=op_type, inputs={'Input': [input], 'Filter': [img_filter]}, outputs={'Output': pre_bias}, attrs={ 'output_size': output_size, 'strides': stride, 'paddings': padding, 'padding_algorithm': padding_algorithm, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, 'data_format': data_format }) if data_format == 'NCHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) else: pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4) out = helper.append_activation(pre_act) return out def conv3d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCDHW'): assert param_attr is not False, "param_attr should not be False in conv3d_transpose." if data_format not in ['NCDHW', 'NDHWC']: raise ValueError( "Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received " + data_format + " but only NCDHW or NDHWC supported.") l_type = "conv3d_transpose" helper = LayerHelper(l_type, **locals()) if not isinstance(input, Variable): raise TypeError("Input of conv3d_transpose must be Variable") if len(input.shape) != 5: raise ValueError( "Input should be 5D tensor, but received input with the shape of {}". format(input.shape)) input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[ -1] stride = utils.convert_to_list(stride, 3, 'stride') dilation = utils.convert_to_list(dilation, 3, 'dilation') if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") def _update_padding(padding, data_format): def is_list_or_tuple(ele): if isinstance(ele, list) or isinstance(ele, tuple): return True return False if is_list_or_tuple(padding) and len(padding) == 5: if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): if not (padding[0] == [0, 0] and padding[1] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[2:5] padding = [ele for a_list in padding for ele in a_list] elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): if not (padding[0] == [0, 0] and padding[4] == [0, 0]): raise ValueError( "Non-zero padding(%s) in the batch or channel dimensions " "is not supported." % str(padding)) padding = padding[1:4] padding = [ele for a_list in padding for ele in a_list] padding = utils.convert_to_list(padding, 6, 'padding') elif is_list_or_tuple(padding) and len(padding) == 6: padding = utils.convert_to_list(padding, 6, 'padding') else: padding = utils.convert_to_list(padding, 3, 'padding') padding = [ padding[0], padding[0], padding[1], padding[1], padding[2], padding[2] ] return padding padding_algorithm = "EXPLICIT" if isinstance(padding, str): padding = padding.upper() if padding not in ["SAME", "VALID"]: raise ValueError( "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." % str(padding)) if padding == "VALID": padding_algorithm = "VALID" padding = [0, 0, 0, 0, 0, 0] elif padding == "SAME": padding_algorithm = "SAME" padding = [0, 0, 0, 0, 0, 0] padding = _update_padding(padding, data_format) if filter_size is None: if output_size is None: raise ValueError("output_size must be set when filter_size is None") if isinstance(output_size, int): output_size = [output_size, output_size, output_size] d_in = input.shape[2] if data_format == 'NCDHW' else input.shape[1] h_in = input.shape[3] if data_format == 'NCDHW' else input.shape[2] w_in = input.shape[4] if data_format == 'NCDHW' else input.shape[3] filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + padding[0] + padding[1] - 1) // dilation[0] + 1 filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + padding[2] + padding[3] - 1) // dilation[1] + 1 filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + padding[4] + padding[5] - 1) // dilation[2] + 1 filter_size = [filter_size_d, filter_size_h, filter_size_w] else: filter_size = utils.convert_to_list(filter_size, 3, 'conv3d_transpose.filter_size') if len(padding) == 6 and utils._is_symmetric_padding(padding, 3): padding = [padding[0], padding[2], padding[4]] if output_size is None: output_size = [] elif isinstance(output_size, (list, tuple, int)): output_size = utils.convert_to_list(output_size, 3, 'output_size') else: raise ValueError("output_size should be int, list[int] or tuple[int]") groups = 1 if groups is None else groups if groups <= 0: raise ValueError( "the groups of conv3d_transpose should be greater than 0. Received groups: {}". format(groups)) if num_filters % groups != 0: raise ValueError("Attr(num_filters) must be divisible by groups," "Received: Attr(num_filters) is {}, the groups is {}". format(num_filters, groups)) filter_shape = [input_channel, num_filters // groups] + filter_size img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) if data_format == 'NCDHW': data_format = 'NCHW' if data_format == 'NDHWC': data_format = 'NHWC' pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=l_type, inputs={'Input': [input], 'Filter': [img_filter]}, outputs={'Output': pre_bias}, attrs={ 'output_size': output_size, 'strides': stride, 'paddings': padding, 'padding_algorithm': padding_algorithm, 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, 'data_format': data_format }) if data_format == 'NCHW': pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) else: pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5) out = helper.append_activation(pre_act) return out def reduce_sum(input, dim=None, keep_dim=False, name=None): if dim is not None and not isinstance(dim, list): dim = [dim] if in_dygraph_mode(): reduce_all = True if dim == None or dim == [] or len(dim) == len( input.shape) else False dim = dim if dim != None and dim != [] else [0] return _C_ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim, 'reduce_all', reduce_all) attrs = { 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False } check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'reduce_sum') helper = LayerHelper('reduce_sum', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='reduce_sum', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out @deprecated(since="2.0.0", update_to="paddle.mean") def reduce_mean(input, dim=None, keep_dim=False, name=None): return paddle.mean(x=input, axis=dim, keepdim=keep_dim, name=name) def reduce_max(input, dim=None, keep_dim=False, name=None): helper = LayerHelper('reduce_max', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( type='reduce_max', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def reduce_min(input, dim=None, keep_dim=False, name=None): helper = LayerHelper('reduce_min', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( type='reduce_min', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def reduce_prod(input, dim=None, keep_dim=False, name=None): helper = LayerHelper('reduce_prod', **locals()) if dim is not None and not isinstance(dim, list): if isinstance(dim, tuple): dim = list(dim) elif isinstance(dim, int): dim = [dim] else: raise TypeError( "The type of axis must be int, list or tuple, but received {}". format(type(dim))) check_variable_and_dtype( input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod') out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='reduce_prod', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def reduce_all(input, dim=None, keep_dim=False, name=None): check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all') helper = LayerHelper('reduce_all', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( type='reduce_all', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def reduce_any(input, dim=None, keep_dim=False, name=None): check_variable_and_dtype(input, 'input', ('bool'), 'reduce_any') helper = LayerHelper('reduce_any', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( type='reduce_any', inputs={'X': input}, outputs={'Out': out}, attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None or dim == [] or len(dim) == len(input.shape) else False }) return out def split(input, num_or_sections, dim=-1, name=None): if in_dygraph_mode(): num = None attrs = () if isinstance(dim, Variable): dim = dim.numpy() dim = dim.item(0) assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0" dim = (len(input.shape) + dim) if dim < 0 else dim attrs += ('axis', dim) if isinstance(num_or_sections, int): num = num_or_sections attrs += ('num', num_or_sections) elif isinstance(num_or_sections, (list, tuple)): num = len(num_or_sections) if utils._contain_var(num_or_sections): for index, item in enumerate(num_or_sections): if isinstance(item, Variable): num_or_sections[index] = num_or_sections[index].numpy()[ 0] attrs += ('sections', list(num_or_sections)) else: attrs += ('sections', list(num_or_sections)) else: raise TypeError( "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but " "received %s." % (type(num_or_sections))) return _C_ops.split(input, num, *attrs) check_variable_and_dtype( input, 'input', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'split') check_type(num_or_sections, 'num_or_sections', (list, int, tuple), 'split') check_type(dim, 'dim', (int, Variable), 'split') if isinstance(dim, Variable): check_dtype(dim.dtype, 'dim', ['int32', 'int64'], 'split') helper = LayerHelper('split', **locals()) input_shape = input.shape inputs = {'X': input} attrs = {'num': num_or_sections if isinstance(num_or_sections, int) else 0} def _get_SectionsTensorList(one_list): tensor_list = [] unk_dim_idx = -1 for idx, dim_size in enumerate(one_list): if isinstance(dim_size, Variable): dim_size.stop_gradient = True tensor_list.append(dim_size) else: assert (isinstance(dim_size, int)) if dim_size == -1: assert unk_dim_idx == -1, ( "Only one value of 'num_or_section' in split can " "be -1. But received num_or_section[%d] is also -1." % idx) unk_dim_idx = idx temp_out = helper.create_variable_for_type_inference('int32') fill_constant( [1], 'int32', dim_size, force_cpu=True, out=temp_out) tensor_list.append(temp_out) return tensor_list if isinstance(dim, Variable): dim.stop_gradient = True inputs['AxisTensor'] = dim else: assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0" dim = (len(input_shape) + dim) if dim < 0 else dim attrs['axis'] = dim if isinstance(num_or_sections, int): assert num_or_sections > 1, 'num_or_sections must be more than 1.' if isinstance(dim, int) and input_shape[dim] > 0: assert input_shape[dim] % num_or_sections ==0, \ "The input's size along the split dimension " \ "must be evenly divisible by Attr(num_or_sections). " \ "But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim]) num = num_or_sections else: if isinstance(dim, int) and input_shape[dim] > 0: assert len(num_or_sections) <= input_shape[ dim], 'len(num_or_sections) must not be more than input.shape[dim].' num = len(num_or_sections) attrs['sections'] = list( map(lambda ele: -1 if isinstance(ele, Variable) else ele, num_or_sections)) if utils._contain_var(num_or_sections): inputs['SectionsTensorList'] = _get_SectionsTensorList( num_or_sections) outs = [ helper.create_variable_for_type_inference(dtype=helper.input_dtype()) for i in range(num) ] helper.append_op( type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs) return outs def l2_normalize(x, axis, epsilon=1e-12, name=None): if len(x.shape) == 1: axis = 0 if in_dygraph_mode(): _, out = _C_ops.norm(x, 'axis', 1 if axis is None else axis, 'epsilon', epsilon) return out check_variable_and_dtype(x, "X", ("float16", "float32", "float64"), "norm") helper = LayerHelper("l2_normalize", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) norm = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="norm", inputs={"X": x}, outputs={"Out": out, "Norm": norm}, attrs={ "axis": 1 if axis is None else axis, "epsilon": epsilon, }) return out @deprecated(since="2.0.0", update_to="paddle.matmul") def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): if in_dygraph_mode(): out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y', transpose_y, 'alpha', float(alpha)) return out def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64'], 'matmul') x_shape = list(x.shape) y_shape = list(y.shape) if len(x_shape) == 1: x_shape = [1] + x_shape if len(y_shape) == 1: y_shape = y_shape + [1] if transpose_x: x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2] if transpose_y: y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2] if x_shape[-1] != y_shape[-2]: assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \ "After performing an optional transpose, Input X's width should be " \ "equal to Y's width for multiplication " \ "prerequisites. But received X's shape: %s, Y's shape: %s\n" % \ (x_shape, y_shape) if len(y_shape) > 2 and len(x_shape) > 2: for i, dim_x in enumerate(x_shape[:-2]): if dim_x < 0 or y_shape[i] < 0: continue if dim_x != y_shape[i]: raise ValueError( "When the matrix is larger than 2 dimensions, the higher " "dimensional values of the two matrices need to be equal. " "But received x_shape[%d] != y_shape[%d]. X's shape: %s, " "Y's shape: %s.\n" % (i, i, x_shape, y_shape)) attrs = { 'transpose_X': transpose_x, 'transpose_Y': transpose_y, 'alpha': float(alpha), } __check_input(x, y) helper = LayerHelper('matmul', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def topk(input, k, name=None): if in_dygraph_mode(): _k = k.numpy().item(0) if isinstance(k, Variable) else k out, indices = _C_ops.top_k(input, 'k', _k) out.stop_gradient = True indices.stop_gradient = True return out, indices inputs = {"X": [input]} attrs = {} if isinstance(k, Variable): inputs['K'] = [k] else: attrs = {'k': k} helper = LayerHelper("top_k", **locals()) values = helper.create_variable_for_type_inference(dtype=input.dtype) indices = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="top_k", inputs=inputs, outputs={"Out": [values], "Indices": [indices]}, attrs=attrs) values.stop_gradient = True indices.stop_gradient = True return values, indices def ctc_greedy_decoder(input, blank, input_length=None, padding_value=0, name=None): check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'ctc_greedy_decoder') helper = LayerHelper("ctc_greedy_decoder", **locals()) _, topk_indices = topk(input, k=1) # ctc align op ctc_out = helper.create_variable_for_type_inference(dtype="int64") if input_length is None: helper.append_op( type="ctc_align", inputs={"Input": [topk_indices]}, outputs={"Output": [ctc_out]}, attrs={"merge_repeated": True, "blank": blank}) return ctc_out else: ctc_out_len = helper.create_variable_for_type_inference(dtype="int64") ctc_input = squeeze(topk_indices, [2]) helper.append_op( type="ctc_align", inputs={"Input": [ctc_input], "InputLength": [input_length]}, outputs={"Output": [ctc_out], "OutputLength": [ctc_out_len]}, attrs={ "merge_repeated": True, "blank": blank, "padding_value": padding_value }) return ctc_out, ctc_out_len def transpose(x, perm, name=None): if in_dygraph_mode(): out, _ = _C_ops.transpose2(x, 'axis', perm) return out check_variable_and_dtype( x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') check_type(perm, 'perm', (list, tuple), 'transpose') if isinstance(perm, tuple): perm = list(perm) if len(perm) != len(x.shape): raise ValueError( "Input(perm) is the permutation of dimensions of Input(x), " "its length should be equal to dimensions of Input(x), " "but received dimension of Input(x) is %s, " "the length of Input(perm) is %s." % (len(x.shape), len(perm))) for idx, dim in enumerate(perm): if dim >= len(x.shape): raise ValueError( "Each element in Input(perm) should be less than Input(x)'s dimension, " "but %d-th element in Input(perm) is %d which exceeds Input(x)'s " "dimension %d." % (idx, perm[idx], len(x.shape))) helper = LayerHelper('transpose', **locals()) out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) helper.append_op( type='transpose2', inputs={'X': [x]}, outputs={'Out': [out], 'XShape': [x_shape]}, attrs={'axis': perm}) return out def im2sequence(input, filter_size=1, stride=1, padding=0, input_image_size=None, out_stride=1, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") check_variable_and_dtype(input, 'input', ['float32'], 'im2sequence') if isinstance(filter_size, int): filter_size = [filter_size, filter_size] if isinstance(stride, int): stride = [stride, stride] if isinstance(padding, int): padding = [padding, padding] if len(padding) == 2: padding.append(padding[0]) padding.append(padding[1]) inputs = {"X": input} attrs = {"kernels": filter_size, "strides": stride, "paddings": padding} if input_image_size: if isinstance(out_stride, int): out_stride = [out_stride, out_stride] inputs["Y"] = input_image_size attrs["out_stride"] = out_stride helper = LayerHelper('im2sequence', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @templatedoc() def row_conv(input, future_context_size, param_attr=None, act=None): helper = LayerHelper('row_conv', **locals()) check_variable_and_dtype(input, 'input', ['float32'], 'row_conv') dtype = helper.input_dtype() filter_shape = [future_context_size + 1, input.shape[-1]] filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='row_conv', inputs={'X': [input], 'Filter': [filter_param]}, outputs={'Out': [out]}) return helper.append_activation(out) @templatedoc() def multiplex(inputs, index, name=None): if in_dygraph_mode(): return _C_ops.multiplex(index, inputs) helper = LayerHelper('multiplex', **locals()) check_type(inputs, 'inputs', (list), 'multiplex') if len(inputs) < 2: raise ValueError( "inputs should be a list object with at least 2 elements.") for id, x in enumerate(inputs): check_variable_and_dtype(x, 'input[' + str(id) + ']', ['float32', 'float64', 'int32', 'int64'], 'multiplex') check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex') out = helper.create_variable_for_type_inference(inputs[0].dtype) helper.append_op( type='multiplex', inputs={'X': inputs, 'Ids': index}, outputs={'Out': [out]}) return out def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'smooth_l1_loss') check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'smooth_l1_loss') helper = LayerHelper('smooth_l1_loss', **locals()) diff = helper.create_variable_for_type_inference(dtype=x.dtype) loss = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='smooth_l1_loss', inputs={ 'X': x, 'Y': y, 'InsideWeight': inside_weight, 'OutsideWeight': outside_weight }, outputs={'Diff': diff, 'Out': loss}, attrs={'sigma': sigma if sigma is not None else 1.0}) return loss @deprecated(since='2.0.0', update_to='paddle.nn.functional.one_hot') def one_hot(input, depth, allow_out_of_range=False): if in_dygraph_mode(): if isinstance(depth, Variable): depth = depth.numpy() assert depth.shape == ( 1, ), "depth of type Variable should have shape [1]" depth = depth.item(0) out = _C_ops.one_hot(input, 'depth', depth, 'allow_out_of_range', allow_out_of_range) out.stop_gradient = True return out helper = LayerHelper("one_hot", **locals()) check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot') check_type(depth, 'depth', (six.integer_types, Variable), 'one_hot') one_hot_out = helper.create_variable_for_type_inference(dtype='float32') if not isinstance(depth, Variable): # user attribute inputs = {'X': input} attrs = {'depth': depth, 'allow_out_of_range': allow_out_of_range} else: depth.stop_gradient = True inputs = {'X': input, 'depth_tensor': depth} attrs = {'allow_out_of_range': allow_out_of_range} helper.append_op( type="one_hot", inputs=inputs, attrs=attrs, outputs={'Out': one_hot_out}) one_hot_out.stop_gradient = True return one_hot_out def autoincreased_step_counter(counter_name=None, begin=1, step=1): helper = LayerHelper('global_step_counter') if counter_name is None: counter_name = '@STEP_COUNTER@' counter, is_new_var = helper.create_or_get_global_variable( name=counter_name, dtype='int64', shape=[1], persistable=True, belong_to_optimizer=True) if is_new_var: helper.set_variable_initializer( counter, initializer=Constant( value=begin - 1, force_cpu=True)) helper.main_program.global_block()._prepend_op( type='increment', inputs={'X': [counter]}, outputs={'Out': [counter]}, attrs={'step': float(step)}) counter.stop_gradient = True return counter def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): if in_dygraph_mode(): #TODO(zhiqiu): enable inplace in dygraph mode. if inplace: warnings.warn( "Inplace on reshape is not allowed and will be discarded in dygraph mode currently." ) if isinstance(shape, (list, tuple)): shape = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in shape ] out, _ = _C_ops.reshape2(x, None, 'shape', shape) elif isinstance(shape, Variable): shape.stop_gradient = True out, _ = _C_ops.reshape2(x, shape) else: raise ValueError( "shape must be an instance of `list`, `tuple` or `Variable`," " got '{}.'".format(type(shape))) return dygraph_utils._append_activation_in_dygraph(out, act) check_variable_and_dtype(x, 'x', [ 'float16', 'float32', 'float64', 'int32', 'int64', 'bool', 'uint16' ], 'reshape') check_type(shape, 'shape', (list, tuple, Variable), 'reshape') check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape') helper = LayerHelper("reshape2", **locals()) def get_attr_shape(list_shape): unk_dim_idx = -1 attrs_shape = [] for dim_idx, dim_size in enumerate(list_shape): if isinstance(dim_size, Variable): attrs_shape.append(-1) else: attrs_shape.append(dim_size) if dim_size == -1: assert unk_dim_idx == -1, ( "Only one dimension value of 'shape' in reshape can " "be -1. But received shape[%d] is also -1." % dim_idx) unk_dim_idx = dim_idx elif dim_size == 0: assert dim_idx < len(x.shape), ( "The index of 0 in `shape` must be less than " "the input tensor X's dimensions. " "But received shape[%d] = 0, X's dimensions = %d." % (dim_idx, len(x.shape))) else: assert dim_size > 0, ( "Each dimension value of 'shape' in reshape must not " "be negative except one unknown dimension. " "But received shape[%d] = %s." % (dim_idx, str(dim_size))) return attrs_shape inputs = {"X": x} attrs = {} if isinstance(shape, Variable): shape.stop_gradient = True inputs["Shape"] = shape elif isinstance(shape, (list, tuple)): assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, " "but received %s." % len(shape)) attrs["shape"] = get_attr_shape(shape) if utils._contain_var(shape): inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape) elif isinstance(actual_shape, Variable): actual_shape.stop_gradient = True inputs["Shape"] = actual_shape out = x if inplace else helper.create_variable_for_type_inference( dtype=x.dtype) x_shape = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="reshape2", inputs=inputs, attrs=attrs, outputs={"Out": out, "XShape": x_shape}) return helper.append_activation(out) def squeeze(input, axes, name=None): if in_dygraph_mode(): out, _ = _C_ops.squeeze2(input, 'axes', axes) return out helper = LayerHelper("squeeze", **locals()) check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'], 'squeeze') check_type(axes, 'axis/axes', (list, tuple), 'squeeze') out = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type="squeeze2", inputs={"X": input}, attrs={"axes": axes}, outputs={"Out": out, "XShape": x_shape}) return out def unsqueeze(input, axes, name=None): if in_dygraph_mode(): if isinstance(axes, int): axes = [axes] elif isinstance(axes, Variable): axes = axes.numpy().tolist() elif isinstance(axes, (list, tuple)): axes = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in axes ] out, _ = _C_ops.unsqueeze2(input, 'axes', axes) return out check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'], 'unsqueeze') helper = LayerHelper("unsqueeze2", **locals()) inputs = {"X": input} attrs = {} if isinstance(axes, int): axes = [axes] if isinstance(axes, Variable): axes.stop_gradient = True inputs["AxesTensor"] = axes elif isinstance(axes, (list, tuple)): if utils._contain_var(axes): inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes) else: attrs["axes"] = axes out = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type="unsqueeze2", inputs=inputs, attrs=attrs, outputs={"Out": out, "XShape": x_shape}) return out def lod_reset(x, y=None, target_lod=None): check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'lod_reset') helper = LayerHelper("lod_reset", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) if y is not None: check_type(y, 'y', (Variable), 'lod_reset') helper.append_op( type="lod_reset", inputs={'X': x, 'Y': y}, outputs={'Out': out}) elif target_lod is not None: helper.append_op( type="lod_reset", inputs={'X': x}, attrs={'target_lod': target_lod}, outputs={'Out': out}) else: raise ValueError("y and target_lod should not be both none.") return out def lod_append(x, level): from collections import Iterable if x is None: raise ValueError("Input(x) can't be None.") if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)): raise ValueError("Input(level) must be list, tuple or Variable.") check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'lod_append') helper = LayerHelper("lod_append", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) inputs = {'X': x} attrs = {'append': True} if isinstance(level, Variable): inputs['Y'] = level #TODO: check y.lod_level = 0 dtype else: attrs['target_lod'] = level helper.append_op( type="lod_reset", inputs=inputs, attrs=attrs, outputs={'Out': out}) return out def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None, data_format='NCHW'): helper = LayerHelper('lrn', **locals()) check_variable_and_dtype(input, 'input', ['float32'], 'lrn') dtype = helper.input_dtype() input_shape = input.shape dims = len(input_shape) if dims != 4: raise ValueError( "Input's dimension size of Op(lrn) must be 4, but received %d." % (dims)) if data_format not in ['NCHW', 'NHWC']: raise ValueError( "Attr(data_format) of Op(lrn) got wrong value: received " + data_format + " but only NCHW or NHWC supported.") mid_out = helper.create_variable_for_type_inference( dtype=dtype, stop_gradient=True) lrn_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="lrn", inputs={"X": input}, outputs={ "Out": lrn_out, "MidOut": mid_out, }, attrs={ "n": n, "k": k, "alpha": alpha, "beta": beta, "data_format": data_format }) return lrn_out def pad(x, paddings, pad_value=0., name=None): check_variable_and_dtype(x, 'x', [ 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', 'complex128' ], "pad") helper = LayerHelper('pad', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad', inputs={'X': x}, outputs={'Out': out}, attrs={'paddings': paddings, 'pad_value': float(pad_value)}) return out def pad_constant_like(x, y, pad_value=0., name=None): check_type(x, 'x', (Variable), 'pad_constant_like') check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], "pad_constant_like") helper = LayerHelper('pad_constant_like', **locals()) dtype = helper.input_dtype(input_param_name='y') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad_constant_like', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'pad_value': float(pad_value)}) return out def label_smooth(label, prior_dist=None, epsilon=0.1, dtype="float32", name=None): if epsilon > 1. or epsilon < 0.: raise ValueError("The value of epsilon must be between 0 and 1.") if in_dygraph_mode(): return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon)) check_variable_and_dtype(label, 'label', ['float32', 'float64'], 'label_smooth') helper = LayerHelper("label_smooth", **locals()) label.stop_gradient = True smooth_label = helper.create_variable_for_type_inference(dtype) helper.append_op( type="label_smooth", inputs={"X": label, "PriorDist": prior_dist} if prior_dist else {"X": label}, outputs={"Out": smooth_label}, attrs={"epsilon": float(epsilon)}) return smooth_label @templatedoc() def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0, rois_num=None, name=None): if in_dygraph_mode(): assert rois_num is not None, "rois_num should not be None in dygraph mode." pool_out, argmaxes = _C_ops.roi_pool( input, rois, rois_num, "pooled_height", pooled_height, "pooled_width", pooled_width, "spatial_scale", spatial_scale) return pool_out, argmaxes check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool') check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool') helper = LayerHelper('roi_pool', **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) argmaxes = helper.create_variable_for_type_inference(dtype='int32') inputs = { "X": input, "ROIs": rois, } if rois_num is not None: inputs['RoisNum'] = rois_num helper.append_op( type="roi_pool", inputs=inputs, outputs={"Out": pool_out, "Argmax": argmaxes}, attrs={ "pooled_height": pooled_height, "pooled_width": pooled_width, "spatial_scale": spatial_scale }) return pool_out @templatedoc() def roi_align(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0, sampling_ratio=-1, rois_num=None, name=None): if in_dygraph_mode(): assert rois_num is not None, "rois_num should not be None in dygraph mode." align_out = _C_ops.roi_align( input, rois, rois_num, "pooled_height", pooled_height, "pooled_width", pooled_width, "spatial_scale", spatial_scale, "sampling_ratio", sampling_ratio) return align_out check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'roi_align') check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'roi_align') helper = LayerHelper('roi_align', **locals()) dtype = helper.input_dtype() align_out = helper.create_variable_for_type_inference(dtype) inputs = { "X": input, "ROIs": rois, } if rois_num is not None: inputs['RoisNum'] = rois_num helper.append_op( type="roi_align", inputs=inputs, outputs={"Out": align_out}, attrs={ "pooled_height": pooled_height, "pooled_width": pooled_width, "spatial_scale": spatial_scale, "sampling_ratio": sampling_ratio }) return align_out def dice_loss(input, label, epsilon=0.00001, name=None): assert input.dtype in (paddle.float32, paddle.float64) assert label.dtype in (paddle.int32, paddle.int64) assert len(input.shape) >= 2, \ "The rank of input should be greater than or equal to 2." assert len(input.shape) == len(label.shape), ( "The rank of input and label should be equal, " "but received input: %d, label: %d." % (len(input.shape), len(label.shape))) assert label.shape[-1] == 1, ("The last dimension of label should be 1, " "but received %d." % label.shape[-1]) assert input.shape[:-1] == label.shape[:-1], ( "All dimensions should be equal except the last one.") assert input.numel() > 0 and label.numel() > 0, \ "Any dimension of input and label cannot be equal to 0." label = squeeze(label, [-1]) label = paddle.nn.functional.one_hot(label, input.shape[-1]) reduce_dim = list(range(1, len(input.shape))) inse = reduce_sum(input * label, dim=reduce_dim) dice_denominator = reduce_sum( input, dim=reduce_dim) + reduce_sum( label, dim=reduce_dim) dice_score = 1 - inse * 2 / (dice_denominator + epsilon) return reduce_mean(dice_score) def image_resize(input, out_shape=None, scale=None, name=None, resample='BILINEAR', actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW'): resample_methods = { 'LINEAR': 'linear', 'BILINEAR': 'bilinear', 'TRILINEAR': 'trilinear', 'NEAREST': 'nearest', 'LINEAR': 'linear', } resample = resample.upper() if resample not in resample_methods: raise ValueError( "The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR', 'TRILINEAR' " "or 'NEAREST' currently.") resample_type = resample_methods[resample] if resample == 'LINEAR' and len(input.shape) != 3: raise ValueError("'LINER only support 3-D tensor.") elif resample in ['BILINEAR', 'NEAREST'] and len(input.shape) != 4: raise ValueError("'BILINEAR' and 'NEAREST' only support 4-D tensor.") elif resample == 'TRILINEAR' and len(input.shape) != 5: raise ValueError("'TRILINEAR'only support 5-D tensor.") if not isinstance(align_corners, bool): raise TypeError("Attr align_corners should be a bool value") if align_mode != 0 and align_mode != 1: raise ValueError("align_mode can only be 0 or 1") if out_shape is None and scale is None: raise ValueError("One of out_shape and scale must not be None.") helper = LayerHelper('{}_interp'.format(resample_type), **locals()) dtype = helper.input_dtype() if len(input.shape) == 3 and data_format not in ['NCW', 'NWC']: raise ValueError( "Got wrong value for param `data_format`: " + data_format + " received but only `NCW` or `NWC` supported for 3-D input.") elif len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']: raise ValueError( "Got wrong value for param `data_format`: " + data_format + " received but only `NCHW` or `NHWC` supported for 4-D input.") elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']: raise ValueError( "Got wrong value for param `data_format`: " + data_format + " received but only `NCDHW` or `NDHWC` supported for 5-D input.") def _is_list_or_turple_(data): return (isinstance(data, list) or isinstance(data, tuple)) if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW': data_layout = 'NCHW' if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC': data_layout = 'NHWC' inputs = {"X": input} attrs = { "out_d": -1, "out_h": -1, "out_w": -1, "interp_method": resample_type, "align_corners": align_corners, "align_mode": align_mode, "data_layout": data_layout } if out_shape is not None: if isinstance(out_shape, Variable): out_shape.stop_gradient = True inputs['OutSize'] = out_shape else: if not (_is_list_or_turple_(out_shape)): raise TypeError( "out_shape should be a list or tuple or Variable.") # Validate the shape contain_var = False for dim_idx, dim_size in enumerate(out_shape): if isinstance(dim_size, Variable): contain_var = True continue assert dim_size > 0, ( "Each dimension size given in out_shape must be greater than 0." ) if contain_var: new_size_tensor = [] size_list = [] for dim in out_shape: if isinstance(dim, Variable): dim.stop_gradient = True new_size_tensor.append(dim) size_list.append(-1) else: assert (isinstance(dim, int)) temp_out = helper.create_variable_for_type_inference( 'int32') fill_constant( [1], 'int32', dim, force_cpu=True, out=temp_out) new_size_tensor.append(temp_out) size_list.append(dim) inputs['SizeTensor'] = new_size_tensor if len(input.shape) == 3: if len(out_shape) != 1: raise ValueError("out_shape length should be 1 for " "input 3-D tensor.") if contain_var: attrs['out_w'] = size_list[0] else: out_shape = list(map(int, out_shape)) attrs['out_w'] = out_shape[0] elif len(input.shape) == 4: if len(out_shape) != 2: raise ValueError("out_shape length should be 2 for " "input 4-D tensor.") if contain_var: attrs['out_h'] = size_list[0] attrs['out_w'] = size_list[1] else: out_shape = list(map(int, out_shape)) attrs['out_h'] = out_shape[0] attrs['out_w'] = out_shape[1] if len(input.shape) == 5: if len(out_shape) != 3: raise ValueError("out_shape length should be 3 for " "input 5-D tensor.") if contain_var: attrs['out_d'] = size_list[0] attrs['out_h'] = size_list[1] attrs['out_w'] = size_list[2] else: out_shape = list(map(int, out_shape)) attrs['out_d'] = out_shape[0] attrs['out_h'] = out_shape[1] attrs['out_w'] = out_shape[2] else: if isinstance(scale, Variable): scale.stop_gradient = True inputs["Scale"] = scale elif isinstance(scale, float) or isinstance(scale, int): if scale <= 0: raise ValueError("Attr(scale) should be greater than zero.") attrs['scale'] = float(scale) else: raise TypeError( "Attr(scale)'s type should be float, int or Variable.") if isinstance(actual_shape, Variable): warnings.warn( "actual_shape will be deprecated, it is recommended to use " "out_shape instead of actual_shape to specify output shape dynamically." ) actual_shape.stop_gradient = True inputs["OutSize"] = actual_shape elif actual_shape is not None: raise TypeError("actual_shape should either be Variable or None.") out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='{}_interp'.format(resample_type), inputs=inputs, outputs={"Out": out}, attrs=attrs) return out @templatedoc(op_type="linear_interp") def resize_linear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCW'): return image_resize(input, out_shape, scale, name, 'LINEAR', actual_shape, align_corners, align_mode, data_format) @templatedoc(op_type="bilinear_interp") def resize_bilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCHW'): return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape, align_corners, align_mode, data_format) @templatedoc(op_type="trilinear_interp") def resize_trilinear(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, align_mode=1, data_format='NCDHW'): return image_resize(input, out_shape, scale, name, 'TRILINEAR', actual_shape, align_corners, align_mode, data_format) @templatedoc(op_type="nearest_interp") def resize_nearest(input, out_shape=None, scale=None, name=None, actual_shape=None, align_corners=True, data_format='NCHW'): return image_resize( input, out_shape, scale, name, 'NEAREST', actual_shape, align_corners, align_mode=1, data_format=data_format) def image_resize_short(input, out_short_len, resample='BILINEAR'): in_shape = input.shape if len(in_shape) != 4: raise ValueError( "The rank of input must be 4 (num_batches, channels, in_h, in_w).") hw = in_shape[2:4] short_idx = hw.index(min(hw)) long_idx = 1 - short_idx out_shape = list(hw) out_shape[short_idx] = out_short_len out_shape[long_idx] = int( float(out_shape[long_idx]) * (float(out_short_len) / float(hw[ short_idx])) + 0.5) return image_resize(input=input, out_shape=out_shape, resample=resample) @deprecated(since="2.0.0", update_to="paddle.gather") def gather(input, index, overwrite=True): if in_dygraph_mode(): return _C_ops.gather(input, index, None, 'overwrite', overwrite) check_variable_and_dtype( input, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], 'gather') check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather') helper = LayerHelper('gather', **locals()) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="gather", inputs={"X": input, "Index": index}, outputs={"Out": out}, attrs={'overwrite': overwrite}) return out @deprecated(since="2.0.0", update_to="paddle.gather_nd") def gather_nd(input, index, name=None): if in_dygraph_mode(): return _C_ops.gather_nd(input, index) check_variable_and_dtype(input, 'input', ['bool', 'float32', 'float64', 'int32', 'int64'], 'gather_np') check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather_np') helper = LayerHelper('gather_nd', **locals()) dtype = helper.input_dtype() output = helper.create_variable_for_type_inference(dtype) helper.append_op( type="gather_nd", inputs={"X": input, "Index": index}, outputs={"Out": output}) return output @deprecated(since="2.0.0", update_to="paddle.scatter") def scatter(input, index, updates, name=None, overwrite=True): helper = LayerHelper('scatter', **locals()) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="scatter", inputs={"X": input, "Ids": index, "Updates": updates}, attrs={'overwrite': overwrite}, outputs={"Out": out}) return out def scatter_nd_add(ref, index, updates, name=None): if in_dygraph_mode(): op = getattr(_C_ops, 'scatter_nd_add') return op(ref, index, updates) if ref.dtype != updates.dtype: raise ValueError("ref and updates must have same data type.") helper = LayerHelper('scatter_nd_add', **locals()) dtype = helper.input_dtype(input_param_name='ref') output = helper.create_variable_for_type_inference(dtype) helper.append_op( type="scatter_nd_add", inputs={"X": ref, "Index": index, "Updates": updates}, outputs={"Out": output}) return output def scatter_nd(index, updates, shape, name=None): return scatter_nd_add(zeros(shape, updates.dtype), index, updates, name) @templatedoc() def random_crop(x, shape, seed=None): helper = LayerHelper("random_crop", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32'], 'random_crop') check_type(shape, 'shape', (list, Variable), 'random_crop') dtype = x.dtype out = helper.create_variable_for_type_inference(dtype) if seed is None: seed = np.random.randint(-65536, 65536) op_attrs = {"shape": shape} if isinstance(seed, int): op_attrs["startup_seed"] = seed seed = helper.create_variable( name=unique_name.generate("random_crop_seed"), dtype="int64", persistable=True) elif not isinstance(seed, Variable): raise ValueError("'seed' must be a Variable or an int.") helper.append_op( type="random_crop", inputs={"X": x, "Seed": seed}, outputs={"Out": out, "SeedOut": seed}, attrs=op_attrs) return out def log(x, name=None): if in_dygraph_mode(): return _C_ops.log(x) check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log") inputs = {'X': [x]} helper = LayerHelper('log', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.relu") def relu(x, name=None): if in_dygraph_mode(): return _C_ops.relu(x) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu') inputs = {'X': [x]} helper = LayerHelper('relu', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.selu") def selu(x, scale=None, alpha=None, name=None): check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'selu') helper = LayerHelper('selu', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) attrs = {} if scale is not None: attrs["scale"] = scale if alpha is not None: attrs["alpha"] = alpha helper.append_op( type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs) return out def mean_iou(input, label, num_classes): if in_dygraph_mode(): return _C_ops.mean_iou(input, label, 'num_classes', num_classes) helper = LayerHelper('mean_iou', **locals()) check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'], 'mean_iou') check_variable_and_dtype(label, 'Labels', ['int32', 'int64'], 'mean_iou') out_mean_iou = helper.create_variable_for_type_inference(dtype='float32') out_wrong = helper.create_variable_for_type_inference(dtype='int32') out_correct = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type="mean_iou", inputs={"Predictions": input, "Labels": label}, outputs={ "OutMeanIou": out_mean_iou, "OutWrong": out_wrong, "OutCorrect": out_correct }, attrs={"num_classes": num_classes}) return out_mean_iou, out_wrong, out_correct def crop(x, shape=None, offsets=None, name=None): check_variable_and_dtype(x, 'x', ['float32'], 'crop') check_type(shape, 'shape', (list, tuple, Variable), 'crop') helper = LayerHelper('crop', **locals()) if offsets is None: offsets = [0] * len(x.shape) out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x} attrs = {} if isinstance(shape, Variable): ipts['Y'] = shape else: attrs['shape'] = shape if isinstance(offsets, Variable): ipts['Offsets'] = offsets else: attrs['offsets'] = offsets helper.append_op( type='crop', inputs=ipts, outputs={'Out': out}, attrs=None if len(attrs) == 0 else attrs) return out def crop_tensor(x, shape=None, offsets=None, name=None): helper = LayerHelper('crop_tensor', **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'crop_tensor') check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor') check_type(offsets, 'offsets', (list, tuple, Variable, type(None)), 'crop_tensor') if offsets is None: offsets = [0] * len(x.shape) out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x} attrs = {} def _attr_shape_check(shape_val): if not isinstance(shape_val, int): raise TypeError( "Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s." % type(shape_val)) if shape_val == 0: raise ValueError( "Attr(shape) of Op(crop_tensor) should not be zero, but received: %s." % str(shape_val)) if shape_val < -1: raise ValueError( "When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s." % str(shape_val)) def _attr_offsets_check(offset_val): if not isinstance(offset_val, int): raise TypeError( "Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s." % type(offset_val)) if offset_val < 0: raise ValueError( "Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s." % str(offset_val)) if isinstance(offsets, Variable): offsets.stop_gradient = True ipts['Offsets'] = offsets attrs['offsets'] = [-1] * len(x.shape) elif utils._contain_var(offsets): new_offsets_tensor = [] offsets_attr = [] for dim in offsets: if isinstance(dim, Variable): dim.stop_gradient = True new_offsets_tensor.append(dim) offsets_attr.append(-1) else: _attr_offsets_check(dim) temp_out = helper.create_variable_for_type_inference('int32') fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out) new_offsets_tensor.append(temp_out) offsets_attr.append(dim) ipts['OffsetsTensor'] = new_offsets_tensor attrs['offsets'] = offsets_attr else: for offset in offsets: _attr_offsets_check(offset) attrs['offsets'] = offsets if isinstance(shape, Variable): shape.stop_gradient = True ipts['Shape'] = shape elif utils._contain_var(shape): new_shape_tensor = [] shape_attr = [] for dim_size in shape: if isinstance(dim_size, Variable): dim_size.stop_gradient = True new_shape_tensor.append(dim_size) shape_attr.append(0) else: _attr_shape_check(dim_size) temp_out = helper.create_variable_for_type_inference('int32') fill_constant( [1], 'int32', dim_size, force_cpu=True, out=temp_out) new_shape_tensor.append(temp_out) shape_attr.append(dim_size) ipts['ShapeTensor'] = new_shape_tensor attrs['shape'] = shape_attr else: for dim_size in shape: _attr_shape_check(dim_size) attrs['shape'] = shape helper.append_op( type='crop_tensor', inputs=ipts, outputs={'Out': out}, attrs=None if len(attrs) == 0 else attrs) return out def affine_grid(theta, out_shape, name=None): helper = LayerHelper('affine_grid') check_variable_and_dtype(theta, 'theta', ['float32', 'float64'], 'affine_grid') if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \ isinstance(out_shape, Variable)): raise ValueError("The out_shape should be a list, tuple or Variable.") if not isinstance(theta, Variable): raise ValueError("The theta should be a Variable.") out = helper.create_variable_for_type_inference(theta.dtype) ipts = {'Theta': theta} attrs = {} if isinstance(out_shape, Variable): ipts['OutputShape'] = out_shape check_variable_and_dtype(out_shape, 'out_shape', ['int32'], 'affine_grid') else: attrs['output_shape'] = out_shape if core.is_compiled_with_rocm(): attrs['use_cudnn'] = False helper.append_op( type='affine_grid', inputs=ipts, outputs={'Output': out}, attrs=None if len(attrs) == 0 else attrs) return out def pad2d(input, paddings=[0, 0, 0, 0], mode='constant', pad_value=0.0, data_format="NCHW", name=None): if in_dygraph_mode(): _paddings = paddings.numpy().tolist() if isinstance( paddings, Variable) else paddings return _C_ops.pad2d(input, 'mode', mode, 'pad_value', pad_value, 'data_format', data_format, 'paddings', _paddings) check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], "pad2d") attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format} inputs = {'X': [input]} if isinstance(paddings, Variable): inputs['Paddings'] = [paddings] attrs['paddings'] = [] else: attrs['paddings'] = paddings helper = LayerHelper('pad2d', **locals()) assert mode in ['reflect', 'edge', 'constant' ], "mode should be one of constant, reflect, edge." dtype = helper.input_dtype(input_param_name='input') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.elu") def elu(x, alpha=1.0, name=None): helper = LayerHelper('elu', **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='elu', inputs={'X': x}, outputs={'Out': out}, attrs={'alpha': alpha}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.relu6") def relu6(x, threshold=6.0, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6') helper = LayerHelper('relu6', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='relu6', inputs={'X': x}, outputs={'Out': out}, attrs={ 'threshold': threshold, 'use_mkldnn': _global_flags()["FLAGS_use_mkldnn"] }) return out @templatedoc() def pow(x, factor=1.0, name=None): check_variable_and_dtype( x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'pow') helper = LayerHelper('pow', **locals()) inputs = {'X': x} attrs = {} if isinstance(factor, Variable): check_variable_and_dtype(factor, 'factor', ['float32'], 'pow') factor.stop_gradient = True inputs['FactorTensor'] = factor else: attrs['factor'] = factor out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @templatedoc() def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): if in_dygraph_mode(): return _C_ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh') helper = LayerHelper('stanh', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='stanh', inputs={'X': x}, outputs={'Out': out}, attrs={'scale_a': scale_a, 'scale_b': scale_b}) return out @templatedoc() def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): if in_dygraph_mode(): return _C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'hard_sigmoid') helper = LayerHelper('hard_sigmoid', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='hard_sigmoid', inputs={'X': x}, outputs={'Out': out}, attrs={'slope': slope, 'offset': offset}) return out @templatedoc() def swish(x, beta=1.0, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish') helper = LayerHelper('swish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='swish', inputs={'X': x}, outputs={'Out': out}, attrs={'slope': beta}) return out @deprecated(since="2.0.0", update_to="paddle.static.nn.prelu") def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu') helper = LayerHelper('prelu', **locals()) if mode not in ['all', 'channel', 'element']: raise ValueError('mode should be one of all, channel, element.') alpha_shape = [1] if mode == 'channel': true_data_format = [ 'NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC' ] if data_format not in true_data_format: raise ValueError( "data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', " "'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format)) data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC' assert len( x.shape ) >= 2, "The size of input shape should be equal or larger than 2 in prelu() when mode is 'channel'" if data_format == 'NHWC': alpha_shape = [1, 1, 1, x.shape[-1]] else: alpha_shape = [1, x.shape[1], 1, 1] elif mode == 'element': assert len( x.shape ) >= 1, "The size of input shape should be equal or larger than 1 in prelu() when mode is 'element'" alpha_shape = [1] + list(x.shape)[1:] dtype = helper.input_dtype(input_param_name='x') alpha = helper.create_parameter( attr=helper.param_attr, shape=alpha_shape, dtype=dtype, is_bias=False, default_initializer=Constant(0.25)) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="prelu", inputs={"X": x, 'Alpha': alpha}, attrs={"mode": mode, "data_format": data_format}, outputs={"Out": out}) return out @templatedoc() def brelu(x, t_min=0.0, t_max=24.0, name=None): if in_dygraph_mode(): return _C_ops.brelu(x, 't_min', t_min, 't_max', t_max) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu') helper = LayerHelper('brelu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='brelu', inputs={'X': x}, outputs={'Out': out}, attrs={'t_min': t_min, 't_max': t_max}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.leaky_relu") @templatedoc() def leaky_relu(x, alpha=0.02, name=None): return paddle.nn.functional.leaky_relu(x, alpha, name) def soft_relu(x, threshold=40.0, name=None): check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'soft_relu') helper = LayerHelper('soft_relu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='soft_relu', inputs={'X': x}, outputs={'Out': out}, attrs={'threshold': threshold}) return out def flatten(x, axis=1, name=None): check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'], 'flatten') helper = LayerHelper('flatten', **locals()) if not (isinstance(x, Variable)): raise ValueError("The input x should be a Variable") if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0: raise ValueError("The axis should be a int, and in range [0, rank(x)]") out = helper.create_variable_for_type_inference(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype) helper.append_op( type='flatten2', inputs={"X": x}, outputs={'Out': out, 'XShape': x_shape}, attrs={"axis": axis}) return out def stack(x, axis=0, name=None): axis = 0 if axis is None else axis if in_dygraph_mode(): return _C_ops.stack(x, 'axis', axis) if not isinstance(x, list) and not isinstance(x, tuple): if isinstance(x, Variable) and x.desc.type( ) == core.VarDesc.VarType.LOD_TENSOR_ARRAY: x = [x] else: raise TypeError("The type of '%s' in %s must be %s, but received %s" % ('x', 'stack', 'list[Tensor], tuple[Tensor] or TensorArray', type(x))) helper = LayerHelper('stack', **locals()) out = helper.create_variable_for_type_inference(x[0].dtype) if x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \ "number of the elements must be 1, but received %s." % len(x) out_index = helper.create_variable_for_type_inference(dtype="int32") for i in x: check_variable_and_dtype(i, 'x', \ ['float16', 'float32', 'float64', 'int32', 'int64'], 'stack') helper.append_op( type='tensor_array_to_tensor', inputs={'X': x[0]}, outputs={'Out': [out], 'OutIndex': [out_index]}, attrs={'axis': axis, 'use_stack': True}) else: helper.append_op( type='stack', inputs={'X': x}, outputs={'Y': out}, attrs={'axis': axis}) return out @templatedoc(op_type="filter_by_instag") def filter_by_instag(ins, ins_tag, filter_tag, is_lod, out_val_if_empty=0): helper = LayerHelper('filter_by_instag', **locals()) out = helper.create_variable_for_type_inference(dtype=ins.dtype) loss_weight = helper.create_variable_for_type_inference(dtype=np.float64) mmap = helper.create_variable_for_type_inference(dtype=ins_tag.dtype) helper.append_op( type='filter_by_instag', inputs={'Ins': ins, 'Ins_tag': ins_tag, 'Filter_tag': filter_tag}, outputs={'Out': out, 'LossWeight': loss_weight, 'IndexMap': mmap}, attrs={'is_lod': is_lod, 'out_val_if_empty': out_val_if_empty}) return [out, loss_weight] def unstack(x, axis=0, num=None): if in_dygraph_mode(): if num == None: num = x.shape[axis] if num == 0: return [] return _C_ops.unstack(x, num, 'axis', int(axis), 'num', num) helper = LayerHelper('unstack', **locals()) if num is None: if axis is None or x.shape[axis] <= 0: raise ValueError('unknown unstack number') else: num = x.shape[axis] outs = [] for _ in range(num): outs.append(helper.create_variable_for_type_inference(x.dtype)) helper.append_op( type='unstack', inputs={'X': [x]}, outputs={'Y': outs}, attrs={'axis': axis, 'num': num}) return outs @deprecated(since='2.0.0', update_to="paddle.expand") def expand(x, expand_times, name=None): if in_dygraph_mode(): attrs = () expand_times_tensor = None if isinstance(expand_times, (list, tuple)): expand_times = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in expand_times ] attrs += ('expand_times', expand_times) elif isinstance(expand_times, Variable): expand_times_tensor = expand_times expand_times_tensor.stop_gradient = True return _C_ops.expand(x, expand_times_tensor, *attrs) inputs = {"X": [x]} attrs = {} check_variable_and_dtype( x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'expand') check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand') if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True: raise ValueError( "expand op bool date type must set the stop_gradient to be False") helper = LayerHelper('expand', input=x, **locals()) def get_attr_expand_times(list_expand_times): attrs_expand_times = [] for idx, times in enumerate(list_expand_times): if isinstance(times, Variable): attrs_expand_times.append(-1) else: attrs_expand_times.append(times) assert times > 0, ( "Each element given in expand_times must not be negative.") return attrs_expand_times if isinstance(expand_times, Variable): expand_times.stop_gradient = True inputs['ExpandTimes'] = expand_times elif isinstance(expand_times, (list, tuple)): attrs['expand_times'] = get_attr_expand_times(expand_times) if utils._contain_var(expand_times): inputs['expand_times_tensor'] = utils._convert_to_tensor_list( expand_times) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='expand', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @deprecated(since='2.0.0', update_to="paddle.expand_as") def expand_as(x, target_tensor, name=None): if in_dygraph_mode(): return _C_ops.expand_as(x, target_tensor) check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as') check_variable_and_dtype(target_tensor, 'target_tensor', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as') helper = LayerHelper('expand_as', input=x, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) inputs = {'X': x, 'target_tensor': target_tensor} helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out}) return out from paddle.fluid.framework import convert_np_dtype_to_dtype_ @deprecated(since='1.8.0', update_to="paddle.uniform") @templatedoc() def uniform_random_batch_size_like(input, shape, dtype='float32', input_dim_idx=0, output_dim_idx=0, min=-1.0, max=1.0, seed=0): check_variable_and_dtype(input, 'Input', ("float32", 'float64', "uint16"), 'uniform_random_batch_size_like') check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like') check_dtype(dtype, 'dtype', ('float32', 'float64', "uint16"), 'uniform_random_batch_size_like') helper = LayerHelper('uniform_random_batch_size_like', **locals()) out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='uniform_random_batch_size_like', inputs={'Input': input}, outputs={'Out': out}, attrs={ 'shape': shape, 'input_dim_idx': input_dim_idx, 'output_dim_idx': output_dim_idx, 'min': min, 'max': max, 'seed': seed, 'dtype': c_dtype }) return out @deprecated(since="2.0.0", update_to="paddle.normal") @templatedoc() def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32', name=None): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) return _C_ops.gaussian_random('shape', shape, 'mean', float(mean), 'std', float(std), 'seed', seed, 'dtype', dtype) check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn') check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn') inputs = {} attrs = { 'mean': mean, 'std': std, 'seed': seed, 'dtype': dtype, 'use_mkldnn': False } utils.get_shape_tensor_inputs( inputs=inputs, attrs=attrs, shape=shape, op_type='gaussian_random/randn') helper = LayerHelper('gaussian_random', **locals()) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='gaussian_random', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @templatedoc() def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'): helper = LayerHelper('sampling_id', **locals()) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sampling_id', inputs={'X': x}, outputs={'Out': out}, attrs={'min': min, 'max': max, 'seed': seed}) return out @deprecated(since='1.8.0', update_to="paddle.normal") @templatedoc() def gaussian_random_batch_size_like(input, shape, input_dim_idx=0, output_dim_idx=0, mean=0.0, std=1.0, seed=0, dtype='float32'): helper = LayerHelper('gaussian_random_batch_size_like', **locals()) check_type(input, 'input', (Variable), 'fluid.layers.gaussian_random_batch_size_like') check_type(shape, 'shape', (list, tuple), 'fluid.layers.gaussian_random_batch_size_like') check_dtype(dtype, 'dtype', ['float16', 'float32', 'int'], 'fluid.layers.gaussian_random_batch_size_like') out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='gaussian_random_batch_size_like', inputs={'Input': input}, outputs={'Out': out}, attrs={ 'shape': shape, 'input_dim_idx': input_dim_idx, 'output_dim_idx': output_dim_idx, 'mean': mean, 'std': std, 'seed': seed, 'dtype': c_dtype }) return out @templatedoc() def sum(x): return paddle.add_n(x) @templatedoc() def slice(input, axes, starts, ends): if in_dygraph_mode(): attrs = () starts_tensor = None ends_tensor = None if isinstance(axes, (list, tuple)): axes = list(axes) if len(axes) == 0: raise ValueError( "Input axes should not be an empty list/tuple.") for i in range(len(axes)): if axes[i] < 0: axes[i] = max(0, axes[i] + len(input.shape)) else: axes[i] = min(len(input.shape) - 1, axes[i]) else: raise ValueError( "Input axes must be a python list or tuple, but reveived {}". format(type(axes))) infer_flags = list(1 for i in range(len(axes))) if isinstance(starts, (list, tuple)): starts = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in starts ] attrs += ('starts', starts) elif isinstance(starts, Variable): starts_tensor = starts starts.stop_gradient = True infer_flags = list(-1 for i in range(len(axes))) if isinstance(ends, (list, tuple)): ends = [ item.numpy().item(0) if isinstance(item, Variable) else item for item in ends ] attrs += ('ends', ends) elif isinstance(ends, Variable): ends_tensor = ends ends_tensor.stop_gradient = True infer_flags = list(-1 for i in range(len(axes))) return _C_ops.slice(input, starts_tensor, ends_tensor, 'axes', axes, 'infer_flags', infer_flags, *attrs) if not isinstance(starts, (list, tuple, Variable)): raise ValueError( "Input starts must be an Variable, python list or tuple.") if not isinstance(ends, (list, tuple, Variable)): raise ValueError( "Input ends must be an Variable, python list or tuple.") helper = LayerHelper('slice', **locals()) inputs = {'Input': input} attrs = {'axes': axes} infer_flags = list(1 for i in range(len(axes))) if isinstance(starts, Variable): starts.stop_gradient = True inputs['StartsTensor'] = starts infer_flags = list(-1 for i in range(len(axes))) elif isinstance(starts, (list, tuple)): attrs['starts'] = [] if utils._contain_var(starts): inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts) for i, dim in enumerate(starts): if isinstance(dim, Variable): attrs['starts'].append(-1) infer_flags[i] = -1 else: attrs['starts'].append(dim) else: attrs['starts'] = starts if isinstance(ends, Variable): ends.stop_gradient = True inputs['EndsTensor'] = ends infer_flags = list(-1 for i in range(len(axes))) elif isinstance(ends, (list, tuple)): attrs['ends'] = [] if utils._contain_var(ends): inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends) for i, dim in enumerate(ends): if isinstance(dim, Variable): attrs['ends'].append(-1) infer_flags[i] = -1 else: attrs['ends'].append(dim) else: attrs['ends'] = ends attrs['infer_flags'] = infer_flags out = helper.create_variable_for_type_inference( dtype=helper.input_dtype('input')) helper.append_op( type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out}) return out @deprecated(since='2.0.0', update_to="paddle.strided_slice") def strided_slice(input, axes, starts, ends, strides): helper = LayerHelper('strided_slice', **locals()) check_variable_and_dtype(input, 'input', ['bool', 'float32', 'float64', 'int32', 'int64'], 'strided_slice') check_type(axes, 'axes', (list, tuple), 'strided_slice') check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice') check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice') check_type(strides, 'strides', (list, tuple, Variable), 'strided_slice') def check_list_elements_dtype(list_input, input_name): if isinstance(list_input, Variable): check_dtype(list_input.dtype, input_name, ['int32'], 'strided_slice') else: for i, var in enumerate(list_input): var_name = input_name + '[' + str(i) + ']' if isinstance(var, Variable): check_dtype(var.dtype, var_name, ['int32'], 'strided_slice') check_list_elements_dtype(axes, 'axes') check_list_elements_dtype(starts, 'starts') check_list_elements_dtype(ends, 'ends') check_list_elements_dtype(strides, 'strides') def get_new_list_tensor(old_list): new_list_tensor = [] for dim in old_list: if isinstance(dim, Variable): dim.stop_gradient = True new_list_tensor.append(dim) else: assert (isinstance(dim, int)) temp_out = helper.create_variable_for_type_inference('int32') fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out) new_list_tensor.append(temp_out) return new_list_tensor inputs = {'Input': input} attrs = {'axes': axes} infer_flags = list(1 for i in range(len(axes))) if in_dygraph_mode(): inputs = {'Input': input} attrs = { 'axes': axes, 'starts': starts, 'ends': ends, 'strides': strides, 'infer_flags': infer_flags } else: if isinstance(starts, Variable): starts.stop_gradient = True inputs['StartsTensor'] = starts elif isinstance(starts, (list, tuple)): attrs['starts'] = [] if utils._contain_var(starts): inputs['StartsTensorList'] = get_new_list_tensor(starts) for i, dim in enumerate(starts): if isinstance(dim, Variable): attrs['starts'].append(-1) infer_flags[i] = -1 else: attrs['starts'].append(dim) else: attrs['starts'] = starts if isinstance(ends, Variable): ends.stop_gradient = True inputs['EndsTensor'] = ends elif isinstance(ends, (list, tuple)): attrs['ends'] = [] if utils._contain_var(ends): inputs['EndsTensorList'] = get_new_list_tensor(ends) for i, dim in enumerate(ends): if isinstance(dim, Variable): attrs['ends'].append(-1) infer_flags[i] = -1 else: attrs['ends'].append(dim) else: attrs['ends'] = ends if isinstance(strides, Variable): strides.stop_gradient = True inputs['StridesTensor'] = strides elif isinstance(strides, (list, tuple)): attrs['strides'] = [] if utils._contain_var(strides): inputs['StridesTensorList'] = get_new_list_tensor(strides) for i, dim in enumerate(strides): if isinstance(dim, Variable): attrs['strides'].append(-1) infer_flags[i] = -1 else: attrs['strides'].append(dim) else: attrs['strides'] = strides attrs['infer_flags'] = infer_flags out = helper.create_variable_for_type_inference( dtype=helper.input_dtype('input')) helper.append_op( type='strided_slice', inputs=inputs, attrs=attrs, outputs={'Out': out}) return out def shape(input): if in_dygraph_mode(): out = _C_ops.shape(input) out.stop_gradient = True return out check_variable_and_dtype(input, 'input', [ 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64', 'complex128' ], 'shape') helper = LayerHelper('shape', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='shape', inputs={'Input': input}, outputs={'Out': out}, stop_gradient=True) return out def rank(input): check_type(input, 'input', (Variable), 'input') ndims = len(input.shape) out = assign(np.array(ndims, 'int32')) return out @deprecated(since="2.0.0", update_to="paddle.numel") def size(input): if in_dygraph_mode(): return _C_ops.size(input) check_variable_and_dtype( input, 'input', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], "size") helper = LayerHelper('size', **locals()) out = helper.create_variable_for_type_inference(dtype='int64') helper.append_op(type='size', inputs={'Input': input}, outputs={'Out': out}) return out def _elementwise_op(helper): op_type = helper.layer_type x = helper.kwargs.get('x', None) y = helper.kwargs.get('y', None) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype( x, 'x', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype( y, 'y', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'], op_type) axis = helper.kwargs.get('axis', -1) use_mkldnn = helper.kwargs.get('use_mkldnn', False) name = helper.kwargs.get('name', None) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type=op_type, inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'axis': axis, 'use_mkldnn': use_mkldnn}) return helper.append_activation(out) def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): if in_dygraph_mode(): _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale out = _C_ops.scale(x, 'scale', float(_scale), 'bias', float(bias), 'bias_after_scale', bias_after_scale) return dygraph_utils._append_activation_in_dygraph(out) check_variable_and_dtype(x, "x", [ 'float16', 'uint16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8' ], "scale") inputs = {'X': [x]} attrs = { 'bias': float(bias), 'bias_after_scale': bias_after_scale, } if isinstance(scale, Variable): inputs['ScaleTensor'] = [scale] else: attrs['scale'] = float(scale) helper = LayerHelper('scale', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs) return helper.append_activation(out) def elementwise_add(x, y, axis=-1, act=None, name=None): if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_add', use_mkldnn=_global_flags()["FLAGS_use_mkldnn"]) return _elementwise_op(LayerHelper('elementwise_add', **locals())) @deprecated(since="2.0.0", update_to="paddle.divide") def elementwise_div(x, y, axis=-1, act=None, name=None): if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_div') return _elementwise_op(LayerHelper('elementwise_div', **locals())) def elementwise_sub(x, y, axis=-1, act=None, name=None): if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_sub') return _elementwise_op(LayerHelper('elementwise_sub', **locals())) @deprecated(since="2.0.0", update_to="paddle.multiply") def elementwise_mul(x, y, axis=-1, act=None, name=None): if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_mul') return _elementwise_op(LayerHelper('elementwise_mul', **locals())) def elementwise_max(x, y, axis=-1, act=None, name=None): if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_max') return _elementwise_op(LayerHelper('elementwise_max', **locals())) def elementwise_min(x, y, axis=-1, act=None, name=None): if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_min') return _elementwise_op(LayerHelper('elementwise_min', **locals())) def elementwise_pow(x, y, axis=-1, act=None, name=None): if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_pow') return _elementwise_op(LayerHelper('elementwise_pow', **locals())) @deprecated(since="2.0.0", update_to="paddle.remainder") def elementwise_mod(x, y, axis=-1, act=None, name=None): if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_mod') return _elementwise_op(LayerHelper('elementwise_mod', **locals())) @deprecated(since="2.0.0", update_to="paddle.floor_divide") def elementwise_floordiv(x, y, axis=-1, act=None, name=None): if in_dygraph_mode(): return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name='elementwise_floordiv') return _elementwise_op(LayerHelper('elementwise_floordiv', **locals())) for func in [ elementwise_add, elementwise_div, elementwise_sub, elementwise_mul, elementwise_max, elementwise_pow, elementwise_min, elementwise_mod, elementwise_floordiv, ]: op_proto = OpProtoHolder.instance().get_op_proto(func.__name__) func.__doc__ = _generate_doc_string_( op_proto, additional_args_lines=[ "axis (int32, optional): If X.dimension != Y.dimension, \ Y.dimension must be a subsequence of x.dimension. \ And axis is the start dimension index for broadcasting Y onto X. ", "act (string, optional): Activation applied to the output. \ Default is None. Details: :ref:`api_guide_activations_en` ", "name (string, optional): Name of the output. \ Default is None. It's used to print debug info for developers. Details: \ :ref:`api_guide_Name` " ], skip_attrs_set={ "x_data_format", "y_data_format", "axis", "use_quantizer", "mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out" }) + """\n""" + str(func.__doc__) doc_list = func.__doc__.splitlines() for idx, val in enumerate(doc_list): if val.startswith("Warning: ") and val.endswith( " instead." ) and "and will be removed in future versions." in val: doc_list.insert(0, doc_list.pop(idx)) func.__doc__ = "\n" + "\n".join(i for i in doc_list) break for func in []: op_proto = OpProtoHolder.instance().get_op_proto(func.__name__) func.__doc__ = _generate_doc_string_( op_proto, additional_args_lines=[ "act (basestring|None): Activation applied to the output.", "name (basestring|None): Name of the output." ]) func.__doc__ = func.__doc__ + """ Examples: .. code-block:: python import paddle.fluid as fluid # example 1: shape(x) = (2, 3, 4, 5), shape(y) = (2, 3, 4, 5) x0 = fluid.layers.data(name="x0", shape=[2, 3, 4, 5], dtype='float32') y0 = fluid.layers.data(name="y0", shape=[2, 3, 4, 5], dtype='float32') z0 = fluid.layers.%s(x0, y0) # example 2: shape(X) = (2, 3, 4, 5), shape(Y) = (5) x1 = fluid.layers.data(name="x1", shape=[2, 3, 4, 5], dtype='float32') y1 = fluid.layers.data(name="y1", shape=[5], dtype='float32') z1 = fluid.layers.%s(x1, y1) # example 3: shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 x2 = fluid.layers.data(name="x2", shape=[2, 3, 4, 5], dtype='float32') y2 = fluid.layers.data(name="y2", shape=[4, 5], dtype='float32') z2 = fluid.layers.%s(x2, y2, axis=2) # example 4: shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 x3 = fluid.layers.data(name="x3", shape=[2, 3, 4, 5], dtype='float32') y3 = fluid.layers.data(name="y3", shape=[3, 4], dtype='float32') z3 = fluid.layers.%s(x3, y3, axis=1) # example 5: shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 x4 = fluid.layers.data(name="x4", shape=[2, 3, 4, 5], dtype='float32') y4 = fluid.layers.data(name="y4", shape=[2], dtype='float32') z4 = fluid.layers.%s(x4, y4, axis=0) # example 6: shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 x5 = fluid.layers.data(name="x5", shape=[2, 3, 4, 5], dtype='float32') y5 = fluid.layers.data(name="y5", shape=[2], dtype='float32') z5 = fluid.layers.%s(x5, y5, axis=0) """ % (func.__name__, func.__name__, func.__name__, func.__name__, func.__name__, func.__name__) def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): if in_dygraph_mode(): op = getattr(_C_ops, op_name) if binary_op: return op(x, y) else: return op(x) check_variable_and_dtype(x, "x", [ "bool", "int8", "int16", "int32", "int64", "float32", "float64" ], op_name) if y is not None: check_variable_and_dtype(y, "y", [ "bool", "int8", "int16", "int32", "int64", "float32", "float64" ], op_name) if out is not None: check_type(out, "out", Variable, op_name) helper = LayerHelper(op_name, **locals()) if binary_op and x.dtype != y.dtype: raise ValueError( "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." % (op_name, x.dtype, y.dtype)) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) if binary_op: helper.append_op( type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}) else: helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) return out def logical_and(x, y, out=None, name=None): return _logical_op( op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True) def logical_or(x, y, out=None, name=None): return _logical_op( op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True) def logical_xor(x, y, out=None, name=None): return _logical_op( op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True) @templatedoc() def logical_not(x, out=None, name=None): return _logical_op( op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False) @templatedoc() def clip(x, min, max, name=None): helper = LayerHelper("clip", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip') if name is None: name = unique_name.generate_with_ignorable_key(".".join( [helper.name, 'tmp'])) out = helper.create_variable( type=x.type, name=name, dtype=x.dtype, persistable=False) helper.append_op( type="clip", inputs={"X": x}, attrs={"min": min, "max": max}, outputs={"Out": out}) return out @templatedoc() def clip_by_norm(x, max_norm, name=None): if in_dygraph_mode(): return _C_ops.clip_by_norm(x, 'max_norm', max_norm) helper = LayerHelper("clip_by_norm", **locals()) check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm') check_type(max_norm, 'max_norm', (float), 'clip_by_norm') if name is None: name = unique_name.generate_with_ignorable_key(".".join( [helper.name, 'tmp'])) out = helper.create_variable( type=x.type, name=name, dtype=x.dtype, persistable=False) helper.append_op( type="clip_by_norm", inputs={"X": x}, attrs={"max_norm": max_norm}, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.mean") @templatedoc() def mean(x, name=None): if in_dygraph_mode(): return _C_ops.mean(x) helper = LayerHelper("mean", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out}) return out @templatedoc() def merge_selected_rows(x, name=None): helper = LayerHelper("merge_selected_rows", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="merge_selected_rows", inputs={"X": x}, attrs={}, outputs={"Out": out}) return out def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): if in_dygraph_mode(): return _C_ops.mul(x, y, 'x_num_col_dims', x_num_col_dims, 'y_num_col_dims', y_num_col_dims) inputs = {"X": [x], "Y": [y]} attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims} helper = LayerHelper("mul", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul') check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="mul", inputs={"X": x, "Y": y}, attrs=attrs, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.nn.functional.maxout") @templatedoc() def maxout(x, groups, name=None, axis=1): return paddle.nn.functional.maxout(**locals()) def space_to_depth(x, blocksize, name=None): helper = LayerHelper("space_to_depth", **locals()) if not (isinstance(blocksize, int)): raise ValueError("blocksize must be a python Int") check_variable_and_dtype(x, 'x', \ ['float16', 'float32', 'float64', 'int32', 'int64'], 'space_to_depth') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="space_to_depth", inputs={"X": x}, attrs={"blocksize": blocksize}, outputs={"Out": out}) return out def affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None, act=None): helper = LayerHelper("affine_channel", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'affine_channel') check_type(scale, 'scale', (Variable, type(None)), 'affine_channel') check_type(bias, 'bias', (Variable, type(None)), 'affine_channel') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="affine_channel", inputs={"X": x, 'Scale': scale, 'Bias': bias}, attrs={"data_layout": data_layout}, outputs={"Out": out}) return helper.append_activation(out) def similarity_focus(input, axis, indexes, name=None): helper = LayerHelper('similarity_focus', **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64'], "similarity_focus") check_type(axis, 'axis', int, "similarity_focus") check_type(indexes, 'indexes', list, "similarity_focus") if axis != 1 and axis != 2 and axis != 3: raise ValueError("axis must be 1, 2 or 3.") if len(indexes) == 0: raise ValueError("indexes can not be empty.") out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='similarity_focus', inputs={'X': input}, outputs={'Out': out}, attrs={"axis": axis, "indexes": indexes}) return out def hash(input, hash_size, num_hash=1, name=None): check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'hash') check_type(hash_size, 'hash_size', int, 'hash') check_type(num_hash, 'num_hash', int, 'hash') helper = LayerHelper('hash', **locals()) out = helper.create_variable_for_type_inference( helper.input_dtype(), stop_gradient=True) helper.append_op( type='hash', inputs={'X': input}, outputs={'Out': out}, attrs={'num_hash': num_hash, 'mod_by': hash_size}) return out @templatedoc() def grid_sampler(x, grid, name=None): helper = LayerHelper("grid_sampler", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler') check_variable_and_dtype(grid, 'grid', ['float32', 'float64'], 'grid_sampler') if not isinstance(x, Variable): return ValueError("The x should be a Variable") if not isinstance(grid, Variable): return ValueError("The grid should be a Variable") out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x, 'Grid': grid} attrs = {'use_cudnn': False} if core.is_compiled_with_rocm() else {} helper.append_op( type='grid_sampler', inputs=ipts, outputs={'Output': out}, attrs=attrs) return out def log_loss(input, label, epsilon=1e-4, name=None): helper = LayerHelper('log_loss', **locals()) check_variable_and_dtype(input, 'input', ['float32'], 'log_loss') check_variable_and_dtype(label, 'label', ['float32'], 'log_loss') loss = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='log_loss', inputs={'Predicted': [input], 'Labels': [label]}, outputs={'Loss': [loss]}, attrs={'epsilon': epsilon}) return loss def add_position_encoding(input, alpha, beta, name=None): if in_dygraph_mode(): return _C_ops.add_position_encoding(input, "alpha", alpha, "beta", beta) helper = LayerHelper('add_position_encoding', **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64'], "add_position_encoding") dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type="add_position_encoding", inputs={"X": input}, outputs={"Out": out}, attrs={"alpha": alpha, "beta": beta}) return out def bilinear_tensor_product(x, y, size, act=None, name=None, param_attr=None, bias_attr=None): helper = LayerHelper('bilinear_tensor_product', **locals()) dtype = helper.input_dtype('x') param_shape = [size, x.shape[1], y.shape[1]] w = helper.create_parameter( attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False) out = helper.create_variable_for_type_inference(dtype=dtype) inputs = {"X": x, "Y": y, "Weight": w} if helper.bias_attr: bias_size = [1, size] bias = helper.create_parameter( attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) inputs["Bias"] = bias helper.append_op( type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out}) return helper.append_activation(out) @templatedoc() def get_tensor_from_selected_rows(x, name=None): check_type(x, 'x', Variable, 'get_tensor_from_selected_rows') if x.type != core.VarDesc.VarType.SELECTED_ROWS: raise TypeError( "The type of 'x' in get_tensor_from_selected_rows must be SELECTED_ROWS." ) helper = LayerHelper('get_tensor_from_selected_rows', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='get_tensor_from_selected_rows', inputs={'X': x}, outputs={'Out': out}, attrs={}) return out def shuffle_channel(x, group, name=None): helper = LayerHelper("shuffle_channel", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) if not isinstance(group, int): raise TypeError("group must be int type") helper.append_op( type="shuffle_channel", inputs={"X": x}, outputs={"Out": out}, attrs={"group": group}) return out @templatedoc() def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): if data_format not in ["NCHW", "NHWC"]: raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. " "Received Attr(data_format): {}.".format(data_format)) if in_dygraph_mode(): return _C_ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio', shift_ratio, 'data_format', data_format) helper = LayerHelper("temporal_shift", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift') check_type(seg_num, 'seg_num', int, 'temporal_shift') check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift') out = helper.create_variable_for_type_inference(dtype=x.dtype) if not isinstance(seg_num, int): raise TypeError("seg_num must be int type.") helper.append_op( type="temporal_shift", inputs={"X": x}, outputs={"Out": out}, attrs={ "seg_num": seg_num, "shift_ratio": shift_ratio, "data_format": data_format }) return out class PyFuncRegistry(object): _register_funcs = [] def __init__(self, func): if func is None or not callable(func): raise TypeError('func must be a Python function') self._func = func args = inspect.getargspec(self._func) if len(args[0]) == 0 and args[1] is None and args[2] is None: self._named_args = None else: self._named_args = args[0] self._id = core._append_python_callable_object_and_return_id(self) PyFuncRegistry._register_funcs.append(self) @classmethod def registered_func(cls, idx): return cls._register_funcs[idx]._func @classmethod def registered_func_num(cls): return len(cls._register_funcs) @property def id(self): return self._id def __call__(self, *args): if self._named_args is None: func_ret = self._func() else: kwargs = dict() idx = 0 for arg in self._named_args: kwargs[arg] = args[idx] idx += 1 func_ret = self._func(*args[idx:], **kwargs) if not isinstance(func_ret, (list, tuple)): func_ret = (func_ret, ) ret = [] for each_ret in func_ret: if each_ret is None or isinstance(each_ret, core.LoDTensor): ret.append(each_ret) continue if not isinstance(each_ret, np.ndarray): each_ret = np.array(each_ret) tensor = core.LoDTensor() tensor.set(each_ret, core.CPUPlace()) ret.append(tensor) return tuple(ret) @static_only @templatedoc() def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): helper = LayerHelper('py_func', **locals()) check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func') if x is None: x = [] elif isinstance(x, Variable): x = [x] elif isinstance(x, tuple): x = list(x) elif not isinstance(x, (list, tuple, Variable)): raise TypeError('Input must be Variable/list(Variable)/tuple(Variable)') check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func') if out is None: out_list = [] elif isinstance(out, Variable): out_list = [out] elif isinstance(out, tuple): out_list = list(out) elif isinstance(out, list): out_list = out else: raise TypeError( 'Output must be Variable/list(Variable)/tuple(Variable)') fwd_func_id = PyFuncRegistry(func).id bwd_func_id = PyFuncRegistry( backward_func).id if backward_func is not None else -1 for each_out in out_list: if len(each_out.shape) == 0: raise ValueError( 'Output shapes of py_func op should be provided by users manually' ) backward_skip_vars = set() if backward_func is not None and skip_vars_in_backward_input is not None: if isinstance(skip_vars_in_backward_input, Variable): skip_vars_in_backward_input = [skip_vars_in_backward_input] fwd_in_out = [v.name for v in x] fwd_in_out.extend([v.name for v in out_list]) fwd_in_out = set(fwd_in_out) backward_skip_vars = set() for v in skip_vars_in_backward_input: if not v.name in fwd_in_out: raise ValueError( 'Variable {} is not found in forward inputs and outputs' .format(v.name)) backward_skip_vars.add(v.name) helper.append_op( type='py_func', inputs={'X': x}, outputs={'Out': out_list}, attrs={ 'forward_callable_id': fwd_func_id, 'backward_callable_id': bwd_func_id, 'backward_skip_vars': list(backward_skip_vars) }) return out py_func.registered_func = PyFuncRegistry.registered_func py_func.registered_func_num = PyFuncRegistry.registered_func_num @templatedoc() def psroi_pool(input, rois, output_channels, spatial_scale, pooled_height, pooled_width, name=None): helper = LayerHelper('psroi_pool', **locals()) if not isinstance(output_channels, int): raise TypeError("output_channels must be int type") if not isinstance(spatial_scale, float): raise TypeError("spatial_scale must be float type") if not isinstance(pooled_height, int): raise TypeError("pooled_height must be int type") if not isinstance(pooled_width, int): raise TypeError("pooled_width must be int type") dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='psroi_pool', inputs={'X': input, 'ROIs': rois}, outputs={'Out': out}, attrs={ 'output_channels': output_channels, 'spatial_scale': spatial_scale, 'pooled_height': pooled_height, 'pooled_width': pooled_width }) return out @templatedoc() def prroi_pool(input, rois, spatial_scale=1.0, pooled_height=1, pooled_width=1, batch_roi_nums=None, name=None): check_variable_and_dtype(input, 'input', ['float32'], 'prroi_pool') check_variable_and_dtype(rois, 'rois', ['float32'], 'prroi_pool') helper = LayerHelper('prroi_pool', **locals()) if not isinstance(spatial_scale, float): raise TypeError("spatial_scale must be float type") if not isinstance(pooled_height, int): raise TypeError("pooled_height must be int type") if not isinstance(pooled_width, int): raise TypeError("pooled_width must be int type") dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) inputs_op = {'X': input, 'ROIs': rois} if batch_roi_nums is not None: inputs_op['BatchRoINums'] = batch_roi_nums helper.append_op( type='prroi_pool', inputs=inputs_op, outputs={'Out': out}, attrs={ 'spatial_scale': spatial_scale, 'pooled_height': pooled_height, 'pooled_width': pooled_width }) return out def pixel_shuffle(x, upscale_factor): check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle') helper = LayerHelper("pixel_shuffle", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) if not isinstance(upscale_factor, int): raise TypeError("upscale factor must be int type") helper.append_op( type="pixel_shuffle", inputs={"X": x}, outputs={"Out": out}, attrs={"upscale_factor": upscale_factor}) return out def fsp_matrix(x, y): check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix') helper = LayerHelper('fsp_matrix', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype( input_param_name='x')) helper.append_op(type='fsp', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def continuous_value_model(input, cvm, use_cvm=True): helper = LayerHelper('cvm', **locals()) out = helper.create_variable(dtype=input.dtype) check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'cvm') helper.append_op( type='cvm', inputs={'X': [input], 'CVM': [cvm]}, outputs={'Y': [out]}, attrs={"use_cvm": use_cvm}) return out def where(condition): if in_dygraph_mode(): return _C_ops.where_index(condition) helper = LayerHelper("where_index", **locals()) out = helper.create_variable_for_type_inference( dtype=core.VarDesc.VarType.INT64) helper.append_op( type='where_index', inputs={'Condition': condition}, outputs={'Out': [out]}) return out @deprecated(since="2.0.0", update_to="paddle.sign") def sign(x): helper = LayerHelper("sign", **locals()) check_type(x, 'x', (Variable, np.ndarray), 'sign') if isinstance(x, np.ndarray): x = assign(x) check_dtype(x.dtype, 'x', ['float16', 'float32', 'float64'], 'sign') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]}) return out def unique(x, dtype='int32'): check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'], "unique") helper = LayerHelper("unique", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) index = helper.create_variable_for_type_inference(dtype) helper.append_op( type='unique', inputs={'X': x}, attrs={'dtype': convert_np_dtype_to_dtype_(dtype)}, outputs={'Out': [out], 'Index': [index]}) return out, index def unique_with_counts(x, dtype='int32'): check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'], "unique_with_counts") if not (dtype == 'int32' or dtype == 'int64'): raise TypeError( "Op unique_with_counts, index dtype must be int32 or int64") if x is None or len(x.shape) != 1: raise ValueError( "Op unique_with_counts, x must not be null and size of dim must be 1" ) helper = LayerHelper("unique_with_counts", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) index = helper.create_variable_for_type_inference(dtype) count = helper.create_variable_for_type_inference(dtype) helper.append_op( type='unique_with_counts', inputs={'X': x}, attrs={'dtype': convert_np_dtype_to_dtype_(dtype)}, outputs={'Out': [out], 'Index': [index], 'Count': [count]}) return out, index, count def deformable_conv(input, offset, mask, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, deformable_groups=None, im2col_step=None, param_attr=None, bias_attr=None, modulated=True, name=None): check_variable_and_dtype(input, "input", ['float32', 'float64'], 'deformable_conv') check_variable_and_dtype(offset, "offset", ['float32', 'float64'], 'deformable_conv') check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv') num_channels = input.shape[1] assert param_attr is not False, "param_attr should not be False here." helper = LayerHelper('deformable_conv', **locals()) dtype = helper.input_dtype() if not isinstance(input, Variable): raise TypeError("Input of deformable_conv must be Variable") if not isinstance(offset, Variable): raise TypeError("Input Offset of deformable_conv must be Variable") if groups is None: num_filter_channels = num_channels else: if num_channels % groups != 0: raise ValueError("num_channels must be divisible by groups.") num_filter_channels = num_channels // groups filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') stride = utils.convert_to_list(stride, 2, 'stride') padding = utils.convert_to_list(padding, 2, 'padding') dilation = utils.convert_to_list(dilation, 2, 'dilation') input_shape = input.shape filter_shape = [num_filters, int(num_filter_channels)] + filter_size def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * num_channels if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " "filter size.".format(filter_elem_num)) std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, default_initializer=_get_default_param_initializer()) pre_bias = helper.create_variable_for_type_inference(dtype) if modulated: helper.append_op( type='deformable_conv', inputs={ 'Input': input, 'Filter': filter_param, 'Offset': offset, 'Mask': mask, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'deformable_groups': deformable_groups, 'im2col_step': im2col_step, }) else: helper.append_op( type='deformable_conv_v1', inputs={ 'Input': input, 'Filter': filter_param, 'Offset': offset, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'deformable_groups': deformable_groups, 'im2col_step': im2col_step, }) output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) return output def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): helper = LayerHelper("unfold", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold') assert len(x.shape) == 4, \ "input should be the format of [N, C, H, W]" if isinstance(kernel_sizes, int): kernel_sizes = [kernel_sizes, kernel_sizes] else: assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \ "kernel_sizes should either be an integer or a list of two integers" if isinstance(strides, int): strides = [strides, strides] else: assert isinstance(strides, list) and (len(strides) == 2), \ "strides should either be an integer or a list of two integers" if isinstance(dilations, int): dilations = [dilations, dilations] else: assert isinstance(dilations, list) and (len(dilations) == 2), \ "dilations should either be an integer or a list of two integers" if isinstance(paddings, int): paddings = [paddings] * 4 elif isinstance(paddings, list): if len(paddings) == 2: paddings = paddings * 2 elif len(paddings) == 4: pass else: raise ValueError( "paddings should either be an integer or a list of 2 or 4 integers" ) else: raise ValueError( "Unexpected type of paddings, it should be either an integer or a list" "of 2 or 4 integers") out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="unfold", inputs={"X": x}, outputs={"Y": out}, attrs={ "kernel_sizes": kernel_sizes, "strides": strides, "paddings": paddings, "dilations": dilations }) return out def deformable_roi_pooling(input, rois, trans, no_trans=False, spatial_scale=1.0, group_size=[1, 1], pooled_height=1, pooled_width=1, part_size=None, sample_per_part=1, trans_std=0.1, position_sensitive=False, name=None): check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'deformable_roi_pooling') check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'deformable_roi_pooling') check_variable_and_dtype(trans, 'trans', ['float32', 'float64'], 'deformable_roi_pooling') check_type(group_size, 'group_size', (list, tuple), 'deformable_roi_pooling') if part_size is not None: check_type(part_size, 'part_size', (list, tuple), 'deformable_roi_pooling') input_channels = input.shape[1] if position_sensitive == False: output_channels = input_channels else: output_channels = input_channels / pooled_height / pooled_width if part_size is None: part_height = pooled_height part_width = pooled_width part_size = [part_height, part_width] part_size = utils.convert_to_list(part_size, 2, 'part_size') group_size = utils.convert_to_list(group_size, 2, 'group_size') helper = LayerHelper('deformable_psroi_pooling', **locals()) dtype = helper.input_dtype() output = helper.create_variable_for_type_inference(dtype) top_count = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type="deformable_psroi_pooling", inputs={"Input": input, "ROIs": rois, "Trans": trans}, outputs={"Output": output, "TopCount": top_count}, attrs={ "no_trans": no_trans, "spatial_scale": spatial_scale, "output_dim": output_channels, "group_size": group_size, "pooled_height": pooled_height, "pooled_width": pooled_width, "part_size": part_size, "sample_per_part": sample_per_part, "trans_std": trans_std }) return output @deprecated(since="2.0.0", update_to="paddle.shard_index") def shard_index(input, index_num, nshards, shard_id, ignore_value=-1): check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index') op_type = 'shard_index' helper = LayerHelper(op_type, **locals()) if shard_id < 0 or shard_id >= nshards: raise ValueError('The shard_id(%d) should be in [0, %d)' % (shard_id, nshards)) out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=op_type, inputs={'X': [input]}, outputs={'Out': out}, attrs={ 'index_num': index_num, 'nshards': nshards, 'shard_id': shard_id, 'ignore_value': ignore_value }, stop_gradient=True) return out @templatedoc() def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None): if in_dygraph_mode(): return _C_ops.hard_swish(x, 'threshold', threshold, 'scale', scale, 'offset', offset) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'hard_swish') helper = LayerHelper('hard_swish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='hard_swish', inputs={'X': x}, outputs={'Out': out}, attrs={'threshold': threshold, 'scale': scale, 'offset': offset}) return out @templatedoc() def mish(x, threshold=20, name=None): if in_dygraph_mode(): return _C_ops.mish(x, 'threshold', threshold) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'mish') check_type(threshold, 'threshold', (float, int), 'mish') assert threshold > 0, "threshold of mish should be greater than 0, " \ "but got {}".format(threshold) helper = LayerHelper('mish', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mish', inputs={'X': x}, outputs={'Out': out}, attrs={'threshold': threshold}) return out def gather_tree(ids, parents): if in_dygraph_mode(): return _C_ops.gather_tree(ids, parents) else: helper = LayerHelper('gather_tree', **locals()) check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree') check_variable_and_dtype(parents, 'parents', ['int32', 'int64'], 'gather_tree') out = helper.create_variable_for_type_inference(dtype=ids.dtype) helper.append_op( type="gather_tree", inputs={"Ids": ids, "Parents": parents}, outputs={"Out": out}) return out @deprecated(since="2.0.0", update_to="paddle.uniform") @templatedoc() def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0, name=None): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) return _C_ops.uniform_random('shape', shape, 'min', float(min), 'max', float(max), 'seed', seed, 'dtype', dtype) check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand') check_dtype(dtype, 'dtype', ('float32', 'float64', 'uint16'), 'uniform_random/rand') inputs = dict() attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype} utils.get_shape_tensor_inputs( inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random/rand') helper = LayerHelper("uniform_random", **locals()) out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="uniform_random", inputs=inputs, attrs=attrs, outputs={"Out": out}) utils.try_set_static_shape_tensor(out, shape) return out def unbind(input, axis=0): helper = LayerHelper("unbind", **locals()) check_type(input, 'input', (Variable), 'unbind') dtype = helper.input_dtype() check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'], 'unbind') if not isinstance(axis, (int)): raise TypeError("The type of 'axis' must be int, but received %s." % (type(axis))) if isinstance(axis, np.generic): axis = np.asscalar(axis) input_shape = input.shape axis_ = axis if axis >= 0 else len(input_shape) + axis num = input_shape[axis_] outs = [ helper.create_variable_for_type_inference(dtype=helper.input_dtype()) for i in range(num) ] helper.append_op( type="unbind", inputs={"X": input}, outputs={"Out": outs}, attrs={"axis": axis}) return outs
true
true
1c357d3712292b01ee95a5bca2342315acb4f8ef
623
py
Python
dojo/db_migrations/0147_rename_sslyze_parser.py
dant24/django-DefectDojo
caf5c91b3f8870d5f466dfaaf5a3a096f8812ad9
[ "BSD-3-Clause" ]
249
2016-09-06T21:04:40.000Z
2018-01-19T15:59:44.000Z
dojo/db_migrations/0147_rename_sslyze_parser.py
dant24/django-DefectDojo
caf5c91b3f8870d5f466dfaaf5a3a096f8812ad9
[ "BSD-3-Clause" ]
255
2016-09-06T21:36:37.000Z
2018-01-19T19:57:57.000Z
dojo/db_migrations/0147_rename_sslyze_parser.py
dant24/django-DefectDojo
caf5c91b3f8870d5f466dfaaf5a3a096f8812ad9
[ "BSD-3-Clause" ]
152
2016-09-06T21:04:54.000Z
2018-01-18T08:52:24.000Z
from django.db import migrations def rename_sslyze_parser(apps, schema_editor): Test_Type_model = apps.get_model('dojo', 'Test_Type') try: test_type_sslyze = Test_Type_model.objects.get(name='SSLyze 3 Scan (JSON)') test_type_sslyze.name = 'SSLyze Scan (JSON)' test_type_sslyze.save() except Test_Type_model.DoesNotExist: # This happens when a new instance of DD is initialized pass class Migration(migrations.Migration): dependencies = [ ('dojo', '0146_lead_optional'), ] operations = [ migrations.RunPython(rename_sslyze_parser), ]
25.958333
83
0.678973
from django.db import migrations def rename_sslyze_parser(apps, schema_editor): Test_Type_model = apps.get_model('dojo', 'Test_Type') try: test_type_sslyze = Test_Type_model.objects.get(name='SSLyze 3 Scan (JSON)') test_type_sslyze.name = 'SSLyze Scan (JSON)' test_type_sslyze.save() except Test_Type_model.DoesNotExist: pass class Migration(migrations.Migration): dependencies = [ ('dojo', '0146_lead_optional'), ] operations = [ migrations.RunPython(rename_sslyze_parser), ]
true
true
1c357d50e73856ed1bfaef590c993ed6007d7fe2
618
py
Python
opencv_detector.py
MingyaoLiu/Jetson-Nano-Projects
45f82ec7e5b99eab9e0bd6cae190cbf453cfe766
[ "MIT" ]
1
2019-04-20T21:55:05.000Z
2019-04-20T21:55:05.000Z
opencv_detector.py
MingyaoLiu/Jetson-Nano-Projects
45f82ec7e5b99eab9e0bd6cae190cbf453cfe766
[ "MIT" ]
3
2020-11-27T16:27:22.000Z
2021-04-06T17:53:28.000Z
opencv_detector.py
MingyaoLiu/Jetson-Nano-Projects
45f82ec7e5b99eab9e0bd6cae190cbf453cfe766
[ "MIT" ]
null
null
null
import numpy as np import cv2 as cv face_cascade = cv.CascadeClassifier('1.xml') eye_cascade = cv.CascadeClassifier('1.xml') img = cv.imread('1.jpeg') gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh) in eyes: cv.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2) cv.imshow('img', img) cv.waitKey(0) cv.destroyAllWindows()
30.9
73
0.645631
import numpy as np import cv2 as cv face_cascade = cv.CascadeClassifier('1.xml') eye_cascade = cv.CascadeClassifier('1.xml') img = cv.imread('1.jpeg') gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh) in eyes: cv.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2) cv.imshow('img', img) cv.waitKey(0) cv.destroyAllWindows()
true
true
1c357d575a08ef8dd6defe03e0126b0442f5c31b
3,410
py
Python
pyMSpec/test/test_mass_spectrum.py
SpatialMetabolomics/pyMS
52c4dce2c4c0eba3c6d447565f3296252f882f9e
[ "Apache-2.0" ]
5
2017-12-18T06:03:51.000Z
2019-04-05T21:12:54.000Z
pyMSpec/test/test_mass_spectrum.py
alexandrovteam/pyMS
52c4dce2c4c0eba3c6d447565f3296252f882f9e
[ "Apache-2.0" ]
null
null
null
pyMSpec/test/test_mass_spectrum.py
alexandrovteam/pyMS
52c4dce2c4c0eba3c6d447565f3296252f882f9e
[ "Apache-2.0" ]
3
2018-10-23T13:13:19.000Z
2021-02-22T09:19:26.000Z
""" Created on 27.08.2015 @author: Dominik Fay """ import unittest import numpy from numpy.ma.testutils import assert_array_equal from .. import mass_spectrum class MassSpectrumTest(unittest.TestCase): def test_init(self): """Check that spectrum is empty on initialization.""" ms = mass_spectrum.MassSpectrum() mzs, ints = ms.get_spectrum() assert_array_equal([], mzs) assert_array_equal([], ints) def test_copy_if_empty(self): """Check that get_spectrum creates a new numpy array if the spectrum has not been set.""" ms = mass_spectrum.MassSpectrum() mzs1, ints1 = ms.get_spectrum() mzs2, ints2 = ms.get_spectrum() self.assertFalse(mzs1 is mzs2) self.assertFalse(ints1 is ints2) def test_copy_if_list(self): """Check that get_spectrum creates a new numpy array if the spectrum has been set as list.""" ms = mass_spectrum.MassSpectrum() mzs_list = [1, 2, 3] ints_list = [2, 4, 6] ms.add_spectrum(mzs_list, ints_list) mzs_array, ints_array = ms.get_spectrum() self.assertFalse(mzs_list is mzs_array) self.assertFalse(ints_list is ints_array) def test_no_copy_if_array(self): """Check that get_spectrum does not create a new numpy array if the spectrum has been set as array.""" ms = mass_spectrum.MassSpectrum() mzs_array1 = numpy.array([1, 2, 3]) ints_array1 = numpy.array([2, 4, 6]) ms.add_spectrum(mzs_array1, ints_array1) mzs_array2, ints_array2 = ms.get_spectrum() self.assertTrue(mzs_array1 is mzs_array2) self.assertTrue(ints_array1 is ints_array2) def test_use_centroid_if_given(self): """Check that get_spectrum returns the centroids if they have been set.""" ms = mass_spectrum.MassSpectrum() mzs_profile1 = numpy.array([1, 2, 3]) ints_profile1 = numpy.array([2, 6, 4]) mzs_centroid1 = numpy.array([2.2]) ints_centroid1 = numpy.array([12]) ms.add_spectrum(mzs_profile1, ints_profile1) ms.add_centroids(mzs_centroid1, ints_centroid1) mzs_profile2, ints_profile2 = ms.get_spectrum(source='profile') mzs_centroid2, ints_centroid2 = ms.get_spectrum(source='centroids') self.assertTrue(mzs_profile1 is mzs_profile2) self.assertTrue(ints_profile1 is ints_profile2) self.assertTrue(mzs_centroid1 is mzs_centroid2) self.assertTrue(ints_centroid1 is ints_centroid2) def test_IOError_if_unknown_kwarg(self): """Check that get_spectrum raises an IOError when an unknown value of the kwarg 'source' is passed.""" ms = mass_spectrum.MassSpectrum() self.assertRaises(IOError, ms.get_spectrum, source='asdf') def test_IOError_if_different_arr_sizes(self): """Check that add_spectrum and add_centroids raise an IOError when the size of the m/z array and the intensity array differ. """ ms = mass_spectrum.MassSpectrum() test_cases = ( ([], [1]), ([-7.4], []), ([2968.2395, 29305.23, -23698.5, 1, 2 ** 28], range(2)), (range(198), range(102958))) for case in test_cases: self.assertRaises(IOError, ms.add_spectrum, *case) self.assertRaises(IOError, ms.add_centroids, *case) if __name__ == "__main__": unittest.main()
36.276596
110
0.66217
import unittest import numpy from numpy.ma.testutils import assert_array_equal from .. import mass_spectrum class MassSpectrumTest(unittest.TestCase): def test_init(self): ms = mass_spectrum.MassSpectrum() mzs, ints = ms.get_spectrum() assert_array_equal([], mzs) assert_array_equal([], ints) def test_copy_if_empty(self): ms = mass_spectrum.MassSpectrum() mzs1, ints1 = ms.get_spectrum() mzs2, ints2 = ms.get_spectrum() self.assertFalse(mzs1 is mzs2) self.assertFalse(ints1 is ints2) def test_copy_if_list(self): ms = mass_spectrum.MassSpectrum() mzs_list = [1, 2, 3] ints_list = [2, 4, 6] ms.add_spectrum(mzs_list, ints_list) mzs_array, ints_array = ms.get_spectrum() self.assertFalse(mzs_list is mzs_array) self.assertFalse(ints_list is ints_array) def test_no_copy_if_array(self): ms = mass_spectrum.MassSpectrum() mzs_array1 = numpy.array([1, 2, 3]) ints_array1 = numpy.array([2, 4, 6]) ms.add_spectrum(mzs_array1, ints_array1) mzs_array2, ints_array2 = ms.get_spectrum() self.assertTrue(mzs_array1 is mzs_array2) self.assertTrue(ints_array1 is ints_array2) def test_use_centroid_if_given(self): ms = mass_spectrum.MassSpectrum() mzs_profile1 = numpy.array([1, 2, 3]) ints_profile1 = numpy.array([2, 6, 4]) mzs_centroid1 = numpy.array([2.2]) ints_centroid1 = numpy.array([12]) ms.add_spectrum(mzs_profile1, ints_profile1) ms.add_centroids(mzs_centroid1, ints_centroid1) mzs_profile2, ints_profile2 = ms.get_spectrum(source='profile') mzs_centroid2, ints_centroid2 = ms.get_spectrum(source='centroids') self.assertTrue(mzs_profile1 is mzs_profile2) self.assertTrue(ints_profile1 is ints_profile2) self.assertTrue(mzs_centroid1 is mzs_centroid2) self.assertTrue(ints_centroid1 is ints_centroid2) def test_IOError_if_unknown_kwarg(self): ms = mass_spectrum.MassSpectrum() self.assertRaises(IOError, ms.get_spectrum, source='asdf') def test_IOError_if_different_arr_sizes(self): ms = mass_spectrum.MassSpectrum() test_cases = ( ([], [1]), ([-7.4], []), ([2968.2395, 29305.23, -23698.5, 1, 2 ** 28], range(2)), (range(198), range(102958))) for case in test_cases: self.assertRaises(IOError, ms.add_spectrum, *case) self.assertRaises(IOError, ms.add_centroids, *case) if __name__ == "__main__": unittest.main()
true
true
1c357dafad42d78510fe46f36e8b335ca5dd5326
4,748
py
Python
rose-blend/tests/test.py
exjam/rose-tools
bcc642b914696381082248b639f29299d17d8282
[ "MIT" ]
11
2018-02-09T04:25:07.000Z
2021-08-08T03:09:02.000Z
rose-blend/tests/test.py
wjsassi/rose-tools
31b5316e28c6fb0add6daa84d3d05eff516d6a8f
[ "MIT" ]
1
2021-04-21T03:09:43.000Z
2021-04-21T03:09:43.000Z
rose-blend/tests/test.py
wjsassi/rose-tools
31b5316e28c6fb0add6daa84d3d05eff516d6a8f
[ "MIT" ]
7
2018-07-08T20:35:15.000Z
2022-03-01T22:49:39.000Z
import os import sys import unittest DIR = os.path.abspath(os.path.dirname(__file__)) ROOT_DIR = os.path.dirname(DIR) DATA_DIR = os.path.join(DIR, "data") # Manually manipulate path so avoid `bpy` imports in `io_rose` module sys.path.append(os.path.join(ROOT_DIR, "io_rose")) from rose.him import * from rose.til import * from rose.zmd import * from rose.zms import * from rose.zon import * class RoseTests(unittest.TestCase): def test_him(self): him_file = os.path.join(DATA_DIR, "30_30.HIM") h = Him(him_file) self.assertEqual(h.width, 65) self.assertEqual(h.length, 65) self.assertEqual(h.grid_count, 4) self.assertEqual(h.patch_scale, 250.0) self.assertEqual(len(h.heights), 65) self.assertEqual(int(h.max_height), 8234) self.assertEqual(int(h.min_height), -500) self.assertEqual(len(h.patches), 16) for patch in h.patches: self.assertEqual(len(patch), 16) self.assertEqual(len(h.quad_patches), 85) def test_til(self): til_file = os.path.join(DATA_DIR, "30_30.TIL") t = Til(til_file) self.assertEqual(t.width, 16) self.assertEqual(t.length, 16) self.assertEqual(len(t.tiles), 16) for patch in t.tiles: self.assertEqual(len(patch), 16) def test_zmd(self): zmd_file = os.path.join(DATA_DIR, "MALE.ZMD") zmd = ZMD(zmd_file) self.assertEqual(len(zmd.bones), 21) def test_zms(self): zms7 = os.path.join(DATA_DIR, "FACE1_00100.ZMS") zms = ZMS(zms7) self.assertEqual(zms.identifier, "ZMS0007") self.assertEqual(zms.flags, 134) self.assertEqual(zms.positions_enabled(), True) self.assertEqual(zms.normals_enabled(), True) self.assertEqual(zms.bones_enabled(), False) self.assertEqual(zms.tangents_enabled(), False) self.assertEqual(zms.uv1_enabled(), True) self.assertEqual(zms.uv2_enabled(), False) self.assertEqual(zms.uv3_enabled(), False) self.assertEqual(zms.uv4_enabled(), False) self.assertEqual(len(zms.vertices), 183) self.assertEqual(len(zms.indices), 292) self.assertEqual(len(zms.bones), 0) self.assertEqual(len(zms.materials), 3) self.assertEqual(len(zms.strips), 0) self.assertEqual(zms.pool, 0) zms8 = os.path.join(DATA_DIR, "BODY1_00100.ZMS") zms = ZMS(zms8) self.assertEqual(zms.identifier, "ZMS0008") self.assertEqual(zms.flags, 182) self.assertEqual(zms.positions_enabled(), True) self.assertEqual(zms.normals_enabled(), True) self.assertEqual(zms.bones_enabled(), True) self.assertEqual(zms.tangents_enabled(), False) self.assertEqual(zms.uv1_enabled(), True) self.assertEqual(zms.uv2_enabled(), False) self.assertEqual(zms.uv3_enabled(), False) self.assertEqual(zms.uv4_enabled(), False) self.assertEqual(len(zms.vertices), 175) self.assertEqual(len(zms.indices), 258) self.assertEqual(len(zms.bones), 12) self.assertEqual(len(zms.materials), 0) self.assertEqual(len(zms.strips), 474) self.assertEqual(zms.pool, 0) def test_zon(self): zon_file = os.path.join(DATA_DIR, "JPT01.ZON") z = Zon(zon_file) self.assertEqual(z.zone_type, ZoneType.BoatVillage) self.assertEqual(z.width, 64) self.assertEqual(z.length, 64) self.assertEqual(z.grid_count, 4) self.assertEqual(z.grid_size, 250.0) self.assertEqual(len(z.positions), 64) for pos in z.positions: self.assertEqual(len(pos), 64) self.assertEqual(len(z.spawns), 6) self.assertEqual(len(z.textures), 49) self.assertEqual(len(z.tiles), 224) self.assertEqual(z.name, "0") self.assertEqual(z.is_underground, False) self.assertEqual(z.background_music_path, "button1") self.assertEqual(z.sky_path, "button2") self.assertEqual(z.economy_check_rate, 20) self.assertEqual(z.population_base, 6000) self.assertEqual(z.population_growth_rate, 50) self.assertEqual(z.metal_consumption, 15) self.assertEqual(z.stone_consumption, 15) self.assertEqual(z.wood_consumption, 5) self.assertEqual(z.leather_consumption, 10) self.assertEqual(z.cloth_consumption, 10) self.assertEqual(z.alchemy_consumption, 5) self.assertEqual(z.chemical_consumption, 5) self.assertEqual(z.industrial_consumption, 10) self.assertEqual(z.medicine_consumption, 5) self.assertEqual(z.food_consumption, 10)
35.17037
69
0.644693
import os import sys import unittest DIR = os.path.abspath(os.path.dirname(__file__)) ROOT_DIR = os.path.dirname(DIR) DATA_DIR = os.path.join(DIR, "data") sys.path.append(os.path.join(ROOT_DIR, "io_rose")) from rose.him import * from rose.til import * from rose.zmd import * from rose.zms import * from rose.zon import * class RoseTests(unittest.TestCase): def test_him(self): him_file = os.path.join(DATA_DIR, "30_30.HIM") h = Him(him_file) self.assertEqual(h.width, 65) self.assertEqual(h.length, 65) self.assertEqual(h.grid_count, 4) self.assertEqual(h.patch_scale, 250.0) self.assertEqual(len(h.heights), 65) self.assertEqual(int(h.max_height), 8234) self.assertEqual(int(h.min_height), -500) self.assertEqual(len(h.patches), 16) for patch in h.patches: self.assertEqual(len(patch), 16) self.assertEqual(len(h.quad_patches), 85) def test_til(self): til_file = os.path.join(DATA_DIR, "30_30.TIL") t = Til(til_file) self.assertEqual(t.width, 16) self.assertEqual(t.length, 16) self.assertEqual(len(t.tiles), 16) for patch in t.tiles: self.assertEqual(len(patch), 16) def test_zmd(self): zmd_file = os.path.join(DATA_DIR, "MALE.ZMD") zmd = ZMD(zmd_file) self.assertEqual(len(zmd.bones), 21) def test_zms(self): zms7 = os.path.join(DATA_DIR, "FACE1_00100.ZMS") zms = ZMS(zms7) self.assertEqual(zms.identifier, "ZMS0007") self.assertEqual(zms.flags, 134) self.assertEqual(zms.positions_enabled(), True) self.assertEqual(zms.normals_enabled(), True) self.assertEqual(zms.bones_enabled(), False) self.assertEqual(zms.tangents_enabled(), False) self.assertEqual(zms.uv1_enabled(), True) self.assertEqual(zms.uv2_enabled(), False) self.assertEqual(zms.uv3_enabled(), False) self.assertEqual(zms.uv4_enabled(), False) self.assertEqual(len(zms.vertices), 183) self.assertEqual(len(zms.indices), 292) self.assertEqual(len(zms.bones), 0) self.assertEqual(len(zms.materials), 3) self.assertEqual(len(zms.strips), 0) self.assertEqual(zms.pool, 0) zms8 = os.path.join(DATA_DIR, "BODY1_00100.ZMS") zms = ZMS(zms8) self.assertEqual(zms.identifier, "ZMS0008") self.assertEqual(zms.flags, 182) self.assertEqual(zms.positions_enabled(), True) self.assertEqual(zms.normals_enabled(), True) self.assertEqual(zms.bones_enabled(), True) self.assertEqual(zms.tangents_enabled(), False) self.assertEqual(zms.uv1_enabled(), True) self.assertEqual(zms.uv2_enabled(), False) self.assertEqual(zms.uv3_enabled(), False) self.assertEqual(zms.uv4_enabled(), False) self.assertEqual(len(zms.vertices), 175) self.assertEqual(len(zms.indices), 258) self.assertEqual(len(zms.bones), 12) self.assertEqual(len(zms.materials), 0) self.assertEqual(len(zms.strips), 474) self.assertEqual(zms.pool, 0) def test_zon(self): zon_file = os.path.join(DATA_DIR, "JPT01.ZON") z = Zon(zon_file) self.assertEqual(z.zone_type, ZoneType.BoatVillage) self.assertEqual(z.width, 64) self.assertEqual(z.length, 64) self.assertEqual(z.grid_count, 4) self.assertEqual(z.grid_size, 250.0) self.assertEqual(len(z.positions), 64) for pos in z.positions: self.assertEqual(len(pos), 64) self.assertEqual(len(z.spawns), 6) self.assertEqual(len(z.textures), 49) self.assertEqual(len(z.tiles), 224) self.assertEqual(z.name, "0") self.assertEqual(z.is_underground, False) self.assertEqual(z.background_music_path, "button1") self.assertEqual(z.sky_path, "button2") self.assertEqual(z.economy_check_rate, 20) self.assertEqual(z.population_base, 6000) self.assertEqual(z.population_growth_rate, 50) self.assertEqual(z.metal_consumption, 15) self.assertEqual(z.stone_consumption, 15) self.assertEqual(z.wood_consumption, 5) self.assertEqual(z.leather_consumption, 10) self.assertEqual(z.cloth_consumption, 10) self.assertEqual(z.alchemy_consumption, 5) self.assertEqual(z.chemical_consumption, 5) self.assertEqual(z.industrial_consumption, 10) self.assertEqual(z.medicine_consumption, 5) self.assertEqual(z.food_consumption, 10)
true
true
1c3580ad808a99cdfced82b0c66ab16a489c9b1f
1,548
py
Python
diff_movie.py
benmontet/K2-noise
a4b682cdf33f85d2dffc4cef115dcedacfccb4b4
[ "MIT" ]
1
2019-05-15T09:14:15.000Z
2019-05-15T09:14:15.000Z
diff_movie.py
benmontet/K2-noise
a4b682cdf33f85d2dffc4cef115dcedacfccb4b4
[ "MIT" ]
null
null
null
diff_movie.py
benmontet/K2-noise
a4b682cdf33f85d2dffc4cef115dcedacfccb4b4
[ "MIT" ]
null
null
null
#Create a movie of the frames import numpy as np from utils_simple import TIME, FLUX, QUALITY import matplotlib.pyplot as plt from matplotlib.cm import get_cmap #Limit ourselves 10-40 for more detail FLUX = FLUX[:, 15:35, 15:35] time = TIME - 1860 diffs = np.diff(FLUX, axis=0) base = "frames/{:0>3d}.png" bin = get_cmap("binary") div = get_cmap("bwr") def update_qual(qual): if qual == 0: qual_ant.set_text("") else: qual_ant.set_text("{:d}".format(qual)) qual_ant.set_color("red") fig, ax = plt.subplots(ncols=2, figsize=(8, 4)) img0 = ax[0].imshow(FLUX[1], cmap=bin, origin="upper", interpolation="none", extent=[15, 35, 35, 15]) ax[0].set_title("Frame") cb0 = plt.colorbar(img0, ax=ax[0]) img1 = ax[1].imshow(diffs[0], cmap=div, origin="upper", interpolation="none", extent=[15, 35, 35, 15]) scale = np.max(np.abs(diffs[0])) img1.set_clim(-scale, scale) ax[1].set_title("Difference") cb1 = plt.colorbar(img1, ax=ax[1]) ant = ax[0].annotate("{:.4f} [BJD - 2456684]".format(time[0]), (17,17), size="x-small") qual_ant = ax[0].annotate("", (31,17), size="x-small") update_qual(QUALITY[0]) fig.canvas.draw_idle() fig.savefig(base.format(0)) for i, (flux, diff, time, qual) in enumerate(zip(FLUX[1:], diffs, time[1:], QUALITY[1:])): img0.set_data(flux) img0.autoscale() ant.set_text("{:.4f} [BJD - 2456684]".format(time)) img1.set_data(diff) scale = np.max(np.abs(diff)) img1.set_clim(-scale, scale) update_qual(qual) fig.canvas.draw_idle() fig.savefig(base.format(i+1))
29.207547
102
0.656977
import numpy as np from utils_simple import TIME, FLUX, QUALITY import matplotlib.pyplot as plt from matplotlib.cm import get_cmap FLUX = FLUX[:, 15:35, 15:35] time = TIME - 1860 diffs = np.diff(FLUX, axis=0) base = "frames/{:0>3d}.png" bin = get_cmap("binary") div = get_cmap("bwr") def update_qual(qual): if qual == 0: qual_ant.set_text("") else: qual_ant.set_text("{:d}".format(qual)) qual_ant.set_color("red") fig, ax = plt.subplots(ncols=2, figsize=(8, 4)) img0 = ax[0].imshow(FLUX[1], cmap=bin, origin="upper", interpolation="none", extent=[15, 35, 35, 15]) ax[0].set_title("Frame") cb0 = plt.colorbar(img0, ax=ax[0]) img1 = ax[1].imshow(diffs[0], cmap=div, origin="upper", interpolation="none", extent=[15, 35, 35, 15]) scale = np.max(np.abs(diffs[0])) img1.set_clim(-scale, scale) ax[1].set_title("Difference") cb1 = plt.colorbar(img1, ax=ax[1]) ant = ax[0].annotate("{:.4f} [BJD - 2456684]".format(time[0]), (17,17), size="x-small") qual_ant = ax[0].annotate("", (31,17), size="x-small") update_qual(QUALITY[0]) fig.canvas.draw_idle() fig.savefig(base.format(0)) for i, (flux, diff, time, qual) in enumerate(zip(FLUX[1:], diffs, time[1:], QUALITY[1:])): img0.set_data(flux) img0.autoscale() ant.set_text("{:.4f} [BJD - 2456684]".format(time)) img1.set_data(diff) scale = np.max(np.abs(diff)) img1.set_clim(-scale, scale) update_qual(qual) fig.canvas.draw_idle() fig.savefig(base.format(i+1))
true
true
1c3580e2d29806b0375466fb2f8a062a91c33992
3,961
py
Python
dax/tests/unit_test_autoprocessor.py
DecisionNeurosciencePsychopathology/dax
4ae68d07128bc61058797f4f09a3a112f351ce21
[ "MIT" ]
28
2015-05-31T22:01:54.000Z
2022-01-14T12:18:48.000Z
dax/tests/unit_test_autoprocessor.py
DecisionNeurosciencePsychopathology/dax
4ae68d07128bc61058797f4f09a3a112f351ce21
[ "MIT" ]
218
2015-01-14T15:09:58.000Z
2022-03-30T17:03:46.000Z
dax/tests/unit_test_autoprocessor.py
DecisionNeurosciencePsychopathology/dax
4ae68d07128bc61058797f4f09a3a112f351ce21
[ "MIT" ]
26
2015-01-08T15:48:15.000Z
2022-01-31T14:58:34.000Z
from unittest import TestCase import io import yaml from dax.processors import AutoProcessor from dax.tests import unit_test_entity_common as common from dax import XnatUtils from dax import yaml_doc class ConnectionStringUnitTest(TestCase): def test_a_xpath(self): print((XnatUtils.InterfaceTemp.A_XPATH.format( project='proj1', subject='subj1', session='sess1', assessor='assr1'))) class AutoProcessorUnitTest(TestCase): @staticmethod def _make_yaml_source(resource): return yaml_doc.YamlDoc().from_string(resource) def _construct_session(self, name): tpo = common.TestProjectObject( common.xnat_contents[name]['projects'][0] ) return tpo.subjects()['subj1'].sessions()['sess1'] def test_scan_processor_construction(self): yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) yaml_source = self._make_yaml_source(common.git_pct_t1_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) def test_test(self): print("hello world") def test_get_assessor_input_types(self): yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) print((ap.get_assessor_input_types())) # def test_scan_assessor_get_assessor_name(self): # tseo = self._construct_session('brain_tiv_from_gif') # tsco = tseo.scan_by_key('1') # # yaml_source = self._make_yaml_source( # common.processor_yamls.scan_brain_tiv_from_gif_yaml) # ap = AutoProcessor(common.FakeXnat, yaml_source) # # actual = ap.get_assessor_name(tsco) # self.assertEquals(actual, # "proj1-x-subj1-x-sess1-x-1-x-BrainTivFromGIF_v1") # def test_scan_assessor_get_assessor(self): # tseo = self._construct_session('brain_tiv_from_gif') # tsco = tseo.scan_by_key('1') # # yaml_source = self._make_yaml_source( # common.processor_yamls.scan_brain_tiv_from_gif_yaml) # ap = AutoProcessor(common.FakeXnat, yaml_source) # # actual, name = ap.get_assessor(tsco) # self.assertEquals(name, # "proj1-x-subj1-x-sess1-x-1-x-BrainTivFromGIF_v1") def test_scan_assessor_should_run(self): tseo = self._construct_session('brain_tiv_from_gif') tsco = tseo.scan_by_key('1') yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) ret = ap.should_run(tseo.info()) self.assertEqual(ret, 1) # TODO: BenM/asr_of_asr/this method needs to run off pyxnat assessor # objects, so create a mocked pyxnat assessor for this (and other) tests def test_scan_assessor_has_inputs(self): tseo = self._construct_session('brain_tiv_from_gif') tsco = tseo.scan_by_key('1') yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) ret, comment = ap.has_inputs(tsco) self.assertEqual(ret, 1) def test_scan_assessor_build_cmds(self): tseo = self._construct_session('brain_tiv_from_gif') tsco = tseo.assessor_by_key('proc1') yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) tsao = tseo.assessors() print(tsao) # TODO:BenM/assessor_of_assessor/we are passing an interface object # rather than a cached object. Fix and then re-enable cmds = ap.get_cmds(tsco, '/testdir') print(("cmds =", cmds))
34.745614
77
0.677102
from unittest import TestCase import io import yaml from dax.processors import AutoProcessor from dax.tests import unit_test_entity_common as common from dax import XnatUtils from dax import yaml_doc class ConnectionStringUnitTest(TestCase): def test_a_xpath(self): print((XnatUtils.InterfaceTemp.A_XPATH.format( project='proj1', subject='subj1', session='sess1', assessor='assr1'))) class AutoProcessorUnitTest(TestCase): @staticmethod def _make_yaml_source(resource): return yaml_doc.YamlDoc().from_string(resource) def _construct_session(self, name): tpo = common.TestProjectObject( common.xnat_contents[name]['projects'][0] ) return tpo.subjects()['subj1'].sessions()['sess1'] def test_scan_processor_construction(self): yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) yaml_source = self._make_yaml_source(common.git_pct_t1_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) def test_test(self): print("hello world") def test_get_assessor_input_types(self): yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) print((ap.get_assessor_input_types())) def test_scan_assessor_should_run(self): tseo = self._construct_session('brain_tiv_from_gif') tsco = tseo.scan_by_key('1') yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) ret = ap.should_run(tseo.info()) self.assertEqual(ret, 1) def test_scan_assessor_has_inputs(self): tseo = self._construct_session('brain_tiv_from_gif') tsco = tseo.scan_by_key('1') yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) ret, comment = ap.has_inputs(tsco) self.assertEqual(ret, 1) def test_scan_assessor_build_cmds(self): tseo = self._construct_session('brain_tiv_from_gif') tsco = tseo.assessor_by_key('proc1') yaml_source = self._make_yaml_source( common.processor_yamls.scan_brain_tiv_from_gif_yaml) ap = AutoProcessor(common.FakeXnat, yaml_source) tsao = tseo.assessors() print(tsao) cmds = ap.get_cmds(tsco, '/testdir') print(("cmds =", cmds))
true
true
1c35830de691ae2c04afec42015310a01c8779c4
1,345
py
Python
remindForApproval.py
Kratosc3/GittigidiyorEntegre
17f011cfec49b3adb2b05f1b3ea559c7a7495c29
[ "MIT" ]
null
null
null
remindForApproval.py
Kratosc3/GittigidiyorEntegre
17f011cfec49b3adb2b05f1b3ea559c7a7495c29
[ "MIT" ]
null
null
null
remindForApproval.py
Kratosc3/GittigidiyorEntegre
17f011cfec49b3adb2b05f1b3ea559c7a7495c29
[ "MIT" ]
null
null
null
from zeep import Client from zeep.transports import Transport from zeep import xsd from zeep import helpers import xmltodict import json class remindForApproval: def __init__(self,apiKey = '',sign = '',time = '',saleCode = 21321,lang = '',session = None): # Zeep Client client = Client(wsdl="https://dev.gittigidiyor.com:8443/listingapi/ws/IndividualSaleService?wsdl", transport=Transport(session=session)) service = client.create_service('http://sale.individual.ws.listingapi.gg.com}IndividualSaleServiceBinding' , 'http://dev.gittigidiyor.com:8080/listingapi/ws/IndividualSaleService') with client.settings(raw_response=True): try: response = helpers.serialize_object(service.remindForApproval(apiKey,sign,time,saleCode,lang).content.decode('utf-8'),dict) #Parsing... jsondata = xmltodict.parse(response) jsondump = json.dumps(jsondata) jsonload = json.loads(jsondump) jsonList = jsonload['env:Envelope']['env:Body']['ns0:remindForApprovalResponse']['return'] self.asJson = jsonList except: self.asJson = None pass
36.351351
189
0.596283
from zeep import Client from zeep.transports import Transport from zeep import xsd from zeep import helpers import xmltodict import json class remindForApproval: def __init__(self,apiKey = '',sign = '',time = '',saleCode = 21321,lang = '',session = None): client = Client(wsdl="https://dev.gittigidiyor.com:8443/listingapi/ws/IndividualSaleService?wsdl", transport=Transport(session=session)) service = client.create_service('http://sale.individual.ws.listingapi.gg.com}IndividualSaleServiceBinding' , 'http://dev.gittigidiyor.com:8080/listingapi/ws/IndividualSaleService') with client.settings(raw_response=True): try: response = helpers.serialize_object(service.remindForApproval(apiKey,sign,time,saleCode,lang).content.decode('utf-8'),dict) jsondata = xmltodict.parse(response) jsondump = json.dumps(jsondata) jsonload = json.loads(jsondump) jsonList = jsonload['env:Envelope']['env:Body']['ns0:remindForApprovalResponse']['return'] self.asJson = jsonList except: self.asJson = None pass
true
true
1c358366a8e6e839414c44203ddea07b6333090d
931
py
Python
21.dirac-dice/py/part2.py
rolandbernard/adventofcode-2021
9249815af62d0fcf79b71357330a1456ea3be1ed
[ "BSD-2-Clause" ]
null
null
null
21.dirac-dice/py/part2.py
rolandbernard/adventofcode-2021
9249815af62d0fcf79b71357330a1456ea3be1ed
[ "BSD-2-Clause" ]
null
null
null
21.dirac-dice/py/part2.py
rolandbernard/adventofcode-2021
9249815af62d0fcf79b71357330a1456ea3be1ed
[ "BSD-2-Clause" ]
null
null
null
import sys from collections import defaultdict from functools import cache pos = tuple(int(l[-1]) for l in sys.stdin.read().strip().split('\n')) prob = defaultdict(lambda: 0) for i in range(1, 4): for j in range(1, 4): for k in range(1, 4): prob[i + j + k] += 1 def applyStep(pos, points, player, d): p = (pos[player] + d - 1) % 10 + 1 if player == 0: return (p, pos[1]), (points[0] + p, points[1]), 1 - player else: return (pos[0], p), (points[0], points[1] + p), 1 - player @cache def universes(pos, points = (0, 0), player = 0): if max(points) >= 21: return (1, 0) if points[0] > points[1] else (0, 1) else: res = [0, 0] for d, n in prob.items(): w0, w1 = universes(*applyStep(pos, points, player, d)) res[0] += n * w0 res[1] += n * w1 return tuple(res) print("Result:", max(universes(pos)))
25.861111
69
0.529538
import sys from collections import defaultdict from functools import cache pos = tuple(int(l[-1]) for l in sys.stdin.read().strip().split('\n')) prob = defaultdict(lambda: 0) for i in range(1, 4): for j in range(1, 4): for k in range(1, 4): prob[i + j + k] += 1 def applyStep(pos, points, player, d): p = (pos[player] + d - 1) % 10 + 1 if player == 0: return (p, pos[1]), (points[0] + p, points[1]), 1 - player else: return (pos[0], p), (points[0], points[1] + p), 1 - player @cache def universes(pos, points = (0, 0), player = 0): if max(points) >= 21: return (1, 0) if points[0] > points[1] else (0, 1) else: res = [0, 0] for d, n in prob.items(): w0, w1 = universes(*applyStep(pos, points, player, d)) res[0] += n * w0 res[1] += n * w1 return tuple(res) print("Result:", max(universes(pos)))
true
true
1c35838db2784a1cadbfa55a5a2c5676e49f705e
7,527
py
Python
autolab_core/dual_quaternion.py
shivamvats/autolab_core
cda081d2e07e3fe6cc9f3e8c86eea92330910d20
[ "Apache-2.0" ]
68
2017-07-02T22:14:47.000Z
2022-03-30T19:09:37.000Z
autolab_core/dual_quaternion.py
shivamvats/autolab_core
cda081d2e07e3fe6cc9f3e8c86eea92330910d20
[ "Apache-2.0" ]
14
2017-06-29T18:27:12.000Z
2022-02-02T20:59:02.000Z
autolab_core/dual_quaternion.py
shivamvats/autolab_core
cda081d2e07e3fe6cc9f3e8c86eea92330910d20
[ "Apache-2.0" ]
35
2017-07-17T01:44:59.000Z
2022-03-30T19:09:28.000Z
""" Class to handle dual quaternions and their interpolations Implementation details inspired by Ben Kenwright's "A Beginners Guide to Dual-Quaternions" http://cs.gmu.edu/~jmlien/teaching/cs451/uploads/Main/dual-quaternion.pdf Author: Jacky Liang """ from numbers import Number import numpy as np from .transformations import quaternion_multiply, quaternion_conjugate class DualQuaternion(object): """Class for handling dual quaternions and their interpolations. Attributes ---------- qr : :obj:`numpy.ndarray` of float A 4-entry quaternion in wxyz format. qd : :obj:`numpy.ndarray` of float A 4-entry quaternion in wxyz format. conjugate : :obj:`DualQuaternion` The conjugate of this DualQuaternion. norm : :obj:`tuple` of :obj:`numpy.ndarray` The normalized vectors for qr and qd, respectively. normalized : :obj:`DualQuaternion` This quaternion with qr normalized. """ def __init__( self, qr=[1, 0, 0, 0], qd=[0, 0, 0, 0], enforce_unit_norm=True ): """Initialize a dual quaternion. Parameters ---------- qr : :obj:`numpy.ndarray` of float A 4-entry quaternion in wxyz format. qd : :obj:`numpy.ndarray` of float A 4-entry quaternion in wxyz format. enforce_unit_norm : bool If true, raises a ValueError when the quaternion is not normalized. Raises ------ ValueError If enforce_unit_norm is True and the norm of qr is not 1. """ self.qr = qr self.qd = qd if enforce_unit_norm: norm = self.norm if not np.allclose(norm[0], [1]): raise ValueError( "Dual quaternion does not have norm 1! Got {0}".format( norm[0] ) ) @property def qr(self): """:obj:`numpy.ndarray` of float: A 4-entry quaternion in wxyz format.""" qr_wxyz = np.roll(self._qr, 1) return qr_wxyz @qr.setter def qr(self, qr_wxyz): qr_wxyz = np.array([n for n in qr_wxyz]) qr_xyzw = np.roll(qr_wxyz, -1) self._qr = qr_xyzw @property def qd(self): """:obj:`numpy.ndarray` of float: A 4-entry quaternion in wxyz format.""" qd_wxyz = np.roll(self._qd, 1) return qd_wxyz @qd.setter def qd(self, qd_wxyz): qd_wxyz = np.array([n for n in qd_wxyz]) if qd_wxyz[0] != 0: raise ValueError( "Invalid dual quaternion! First value of Qd must be 0. " f"Got {qd_wxyz[0]}" ) qd_xyzw = np.roll(qd_wxyz, -1) self._qd = qd_xyzw @property def conjugate(self): """:obj:`DualQuaternion`: The conjugate of this quaternion.""" qr_c_xyzw = quaternion_conjugate(self._qr) qd_c_xyzw = quaternion_conjugate(self._qd) qr_c_wxyz = np.roll(qr_c_xyzw, 1) qd_c_wxyz = np.roll(qd_c_xyzw, 1) return DualQuaternion(qr_c_wxyz, qd_c_wxyz) @property def norm(self): """:obj:`tuple` of :obj:`numpy.ndarray`: The normalized vectors for qr and qd, respectively.""" qr_c = quaternion_conjugate(self._qr) qd_c = quaternion_conjugate(self._qd) qr_norm = np.linalg.norm(quaternion_multiply(self._qr, qr_c)) qd_norm = np.linalg.norm( quaternion_multiply(self._qr, qd_c) + quaternion_multiply(self._qd, qr_c) ) return (qr_norm, qd_norm) @property def normalized(self): """:obj:`DualQuaternion`: This quaternion with qr normalized.""" qr = self.qr / 1.0 / np.linalg.norm(self.qr) return DualQuaternion(qr, self.qd, True) def copy(self): """Return a copy of this quaternion. Returns ------- :obj:`DualQuaternion` The copied DualQuaternion. """ return DualQuaternion(self.qr.copy(), self.qd.copy()) @staticmethod def interpolate(dq0, dq1, t): """Return the interpolation of two DualQuaternions. This uses the Dual Quaternion Linear Blending Method as described by Matthew Smith's 'Applications of Dual Quaternions in Three Dimensional Transformation and Interpolation' https://www.cosc.canterbury.ac.nz/research/reports/HonsReps/2013/hons_1305.pdf Parameters ---------- dq0 : :obj:`DualQuaternion` The first DualQuaternion. dq1 : :obj:`DualQuaternion` The second DualQuaternion. t : float The interpolation step in [0,1]. When t=0, this returns dq0, and when t=1, this returns dq1. Returns ------- :obj:`DualQuaternion` The interpolated DualQuaternion. Raises ------ ValueError If t isn't in [0,1]. """ if not 0 <= t <= 1: raise ValueError( "Interpolation step must be between 0 and 1! Got {0}".format(t) ) dqt = dq0 * (1 - t) + dq1 * t return dqt.normalized def __mul__(self, val): """Multiplies the dual quaternion by another dual quaternion or a scalar. Parameters ---------- val : :obj:`DualQuaternion` or number The value by which to multiply this dual quaternion. Returns ------- :obj:`DualQuaternion` A new DualQuaternion that results from the multiplication. Raises ------ ValueError If val is not a DualQuaternion or Number. """ if isinstance(val, DualQuaternion): new_qr_xyzw = quaternion_multiply(self._qr, val._qr) new_qd_xyzw = quaternion_multiply( self._qr, val._qd ) + quaternion_multiply(self._qd, val._qr) new_qr_wxyz = np.roll(new_qr_xyzw, 1) new_qd_wxyz = np.roll(new_qd_xyzw, 1) return DualQuaternion(new_qr_wxyz, new_qd_wxyz) elif isinstance(val, Number): new_qr_wxyz = val * self.qr new_qd_wxyz = val * self.qd return DualQuaternion(new_qr_wxyz, new_qd_wxyz, False) raise ValueError( "Cannot multiply dual quaternion with object of type {0}".format( type(val) ) ) def __add__(self, val): """Adds the dual quaternion to another dual quaternion. Parameters ---------- val : :obj:`DualQuaternion` The DualQuaternion to add to this one. Returns ------- :obj:`DualQuaternion` A new DualQuaternion that results from the addition.. Raises ------ ValueError If val is not a DualQuaternion. """ if not isinstance(val, DualQuaternion): raise ValueError( "Cannot add dual quaternion with object of type {0}".format( type(val) ) ) new_qr_wxyz = self.qr + val.qr new_qd_wxyz = self.qd + val.qd new_qr_wxyz = new_qr_wxyz / np.linalg.norm(new_qr_wxyz) return DualQuaternion(new_qr_wxyz, new_qd_wxyz, False) def __str__(self): return "{0}+{1}e".format(self.qr, self.qd) def __repr__(self): return "DualQuaternion({0},{1})".format(repr(self.qr), repr(self.qd))
29.402344
86
0.57048
from numbers import Number import numpy as np from .transformations import quaternion_multiply, quaternion_conjugate class DualQuaternion(object): def __init__( self, qr=[1, 0, 0, 0], qd=[0, 0, 0, 0], enforce_unit_norm=True ): self.qr = qr self.qd = qd if enforce_unit_norm: norm = self.norm if not np.allclose(norm[0], [1]): raise ValueError( "Dual quaternion does not have norm 1! Got {0}".format( norm[0] ) ) @property def qr(self): qr_wxyz = np.roll(self._qr, 1) return qr_wxyz @qr.setter def qr(self, qr_wxyz): qr_wxyz = np.array([n for n in qr_wxyz]) qr_xyzw = np.roll(qr_wxyz, -1) self._qr = qr_xyzw @property def qd(self): qd_wxyz = np.roll(self._qd, 1) return qd_wxyz @qd.setter def qd(self, qd_wxyz): qd_wxyz = np.array([n for n in qd_wxyz]) if qd_wxyz[0] != 0: raise ValueError( "Invalid dual quaternion! First value of Qd must be 0. " f"Got {qd_wxyz[0]}" ) qd_xyzw = np.roll(qd_wxyz, -1) self._qd = qd_xyzw @property def conjugate(self): qr_c_xyzw = quaternion_conjugate(self._qr) qd_c_xyzw = quaternion_conjugate(self._qd) qr_c_wxyz = np.roll(qr_c_xyzw, 1) qd_c_wxyz = np.roll(qd_c_xyzw, 1) return DualQuaternion(qr_c_wxyz, qd_c_wxyz) @property def norm(self): qr_c = quaternion_conjugate(self._qr) qd_c = quaternion_conjugate(self._qd) qr_norm = np.linalg.norm(quaternion_multiply(self._qr, qr_c)) qd_norm = np.linalg.norm( quaternion_multiply(self._qr, qd_c) + quaternion_multiply(self._qd, qr_c) ) return (qr_norm, qd_norm) @property def normalized(self): qr = self.qr / 1.0 / np.linalg.norm(self.qr) return DualQuaternion(qr, self.qd, True) def copy(self): return DualQuaternion(self.qr.copy(), self.qd.copy()) @staticmethod def interpolate(dq0, dq1, t): if not 0 <= t <= 1: raise ValueError( "Interpolation step must be between 0 and 1! Got {0}".format(t) ) dqt = dq0 * (1 - t) + dq1 * t return dqt.normalized def __mul__(self, val): if isinstance(val, DualQuaternion): new_qr_xyzw = quaternion_multiply(self._qr, val._qr) new_qd_xyzw = quaternion_multiply( self._qr, val._qd ) + quaternion_multiply(self._qd, val._qr) new_qr_wxyz = np.roll(new_qr_xyzw, 1) new_qd_wxyz = np.roll(new_qd_xyzw, 1) return DualQuaternion(new_qr_wxyz, new_qd_wxyz) elif isinstance(val, Number): new_qr_wxyz = val * self.qr new_qd_wxyz = val * self.qd return DualQuaternion(new_qr_wxyz, new_qd_wxyz, False) raise ValueError( "Cannot multiply dual quaternion with object of type {0}".format( type(val) ) ) def __add__(self, val): if not isinstance(val, DualQuaternion): raise ValueError( "Cannot add dual quaternion with object of type {0}".format( type(val) ) ) new_qr_wxyz = self.qr + val.qr new_qd_wxyz = self.qd + val.qd new_qr_wxyz = new_qr_wxyz / np.linalg.norm(new_qr_wxyz) return DualQuaternion(new_qr_wxyz, new_qd_wxyz, False) def __str__(self): return "{0}+{1}e".format(self.qr, self.qd) def __repr__(self): return "DualQuaternion({0},{1})".format(repr(self.qr), repr(self.qd))
true
true
1c3583babedf94c5ecb7f0ee8b82f2f755e2da54
9,608
py
Python
pyslam/feature_r2d2.py
dysdsyd/VO_benchmark
a7602edab934419c1ec73618ee655e18026f834f
[ "Apache-2.0" ]
2
2021-09-11T09:13:31.000Z
2021-11-03T01:39:56.000Z
pyslam/feature_r2d2.py
dysdsyd/VO_benchmark
a7602edab934419c1ec73618ee655e18026f834f
[ "Apache-2.0" ]
null
null
null
pyslam/feature_r2d2.py
dysdsyd/VO_benchmark
a7602edab934419c1ec73618ee655e18026f834f
[ "Apache-2.0" ]
null
null
null
""" * This file is part of PYSLAM. * Adapted from https://raw.githubusercontent.com/naver/r2d2/master/extract.py, see the licence therein. * * Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com> * * PYSLAM is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * PYSLAM is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with PYSLAM. If not, see <http://www.gnu.org/licenses/>. """ # adapted from from https://raw.githubusercontent.com/naver/r2d2/master/extract.py import config config.cfg.set_lib('r2d2') import os, pdb from PIL import Image import numpy as np import torch import cv2 from threading import RLock from r2d2.tools import common from r2d2.tools.dataloader import norm_RGB from r2d2.nets.patchnet import * import argparse from utils_sys import Printer kVerbose = False def load_network(model_fn): checkpoint = torch.load(model_fn) print("\n>> Creating net = " + checkpoint['net']) net = eval(checkpoint['net']) nb_of_weights = common.model_size(net) print(f" ( Model size: {nb_of_weights/1000:.0f}K parameters )") # initialization weights = checkpoint['state_dict'] net.load_state_dict({k.replace('module.',''):v for k,v in weights.items()}) return net.eval() class NonMaxSuppression (torch.nn.Module): def __init__(self, rel_thr=0.7, rep_thr=0.7): nn.Module.__init__(self) self.max_filter = torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.rel_thr = rel_thr self.rep_thr = rep_thr def forward(self, reliability, repeatability, **kw): assert len(reliability) == len(repeatability) == 1 reliability, repeatability = reliability[0], repeatability[0] # local maxima maxima = (repeatability == self.max_filter(repeatability)) # remove low peaks maxima *= (repeatability >= self.rep_thr) maxima *= (reliability >= self.rel_thr) return maxima.nonzero().t()[2:4] def extract_multiscale( net, img, detector, scale_f=2**0.25, min_scale=0.0, max_scale=1, min_size=256, max_size=1024, verbose=False): old_bm = torch.backends.cudnn.benchmark torch.backends.cudnn.benchmark = False # speedup # extract keypoints at multiple scales B, three, H, W = img.shape assert B == 1 and three == 3, "should be a batch with a single RGB image" assert max_scale <= 1 s = 1.0 # current scale factor level = 0 L = [] X,Y,S,C,Q,D = [],[],[],[],[],[] while s+0.001 >= max(min_scale, min_size / max(H,W)): if s-0.001 <= min(max_scale, max_size / max(H,W)): nh, nw = img.shape[2:] if verbose: print(f"extracting at scale x{s:.02f} = {nw:4d}x{nh:3d} - level {level}") # extract descriptors with torch.no_grad(): res = net(imgs=[img]) # get output and reliability map descriptors = res['descriptors'][0] reliability = res['reliability'][0] repeatability = res['repeatability'][0] # normalize the reliability for nms # extract maxima and descs y,x = detector(**res) # nms c = reliability[0,0,y,x] q = repeatability[0,0,y,x] d = descriptors[0,:,y,x].t() n = d.shape[0] # accumulate multiple scales X.append(x.float() * W/nw) Y.append(y.float() * H/nh) S.append((32/s) * torch.ones(n, dtype=torch.float32, device=d.device)) C.append(c) Q.append(q) D.append(d) L_tmp =level * np.ones(n,dtype=np.int32) L = np.concatenate((L, L_tmp), axis=0).astype(np.int32) level += 1 s /= scale_f # down-scale the image for next iteration nh, nw = round(H*s), round(W*s) img = F.interpolate(img, (nh,nw), mode='bilinear', align_corners=False) # restore value torch.backends.cudnn.benchmark = old_bm Y = torch.cat(Y) X = torch.cat(X) S = torch.cat(S) # scale scores = torch.cat(C) * torch.cat(Q) # scores = reliability * repeatability XYS = torch.stack([X,Y,S], dim=-1) D = torch.cat(D) return XYS, D, scores, L # convert matrix of pts into list of keypoints def convert_pts_to_keypoints(pts, scores, sizes, levels): assert(len(pts)==len(scores)) kps = [] if pts is not None: # convert matrix [Nx2] of pts into list of keypoints kps = [ cv2.KeyPoint(p[0], p[1], _size=sizes[i], _response=scores[i], _octave=levels[i]) for i,p in enumerate(pts) ] return kps # TODO: fix the octave field of the output keypoints # interface for pySLAM class R2d2Feature2D: def __init__(self, num_features = 2000, scale_f = 2**0.25, min_size = 256, max_size = 1300, #1024, min_scale = 0, max_scale = 1, reliability_thr = 0.7, repeatability_thr = 0.7, do_cuda=True): print('Using R2d2Feature2D') self.lock = RLock() self.model_base_path = config.cfg.root_folder + '/thirdparty/r2d2' self.model_weights_path = self.model_base_path + '/models/r2d2_WASF_N16.pt' #print('model_weights_path:',self.model_weights_path) self.pts = [] self.kps = [] self.des = [] self.frame = None self.num_features = num_features self.scale_f = scale_f self.min_size = min_size self.max_size = max_size self.min_scale = min_scale self.max_scale = max_scale self.reliability_thr = reliability_thr self.repeatability_thr = repeatability_thr self.do_cuda = do_cuda if do_cuda: gpus = [0] else: gpus = -1 self.gpus = gpus self.do_cuda = common.torch_set_gpu(gpus) print('==> Loading pre-trained network.') self.net = load_network(self.model_weights_path) if self.do_cuda: self.net = self.net.cuda() # create the non-maxima detector self.detector = NonMaxSuppression(rel_thr=reliability_thr, rep_thr=repeatability_thr) print('==> Successfully loaded pre-trained network.') def compute_kps_des(self,img): with self.lock: H, W = img.shape[:2] img = norm_RGB(img)[None] if self.do_cuda: img = img.cuda() # extract keypoints/descriptors for a single image xys, desc, scores, levels = extract_multiscale(self.net, img, self.detector, scale_f = self.scale_f, min_scale = self.min_scale, max_scale = self.max_scale, min_size = self.min_size, max_size = self.max_size, verbose = kVerbose) xys = xys.cpu().numpy() desc = desc.cpu().numpy() scores = scores.cpu().numpy() idxs = scores.argsort()[-self.num_features or None:] selected_xys = xys[idxs] self.pts = selected_xys[:,:2] sizes = selected_xys[:,2] des = desc[idxs] scores = scores[idxs] levels = np.array(levels)[idxs] kps = convert_pts_to_keypoints(self.pts, scores, sizes, levels) return kps, des def detectAndCompute(self, frame, mask=None): #mask is a fake input with self.lock: self.frame = frame self.kps, self.des = self.compute_kps_des(frame) if kVerbose: print('detector: R2D2 , descriptor: R2D2 , #features: ', len(self.kps), ', frame res: ', frame.shape[0:2]) return self.kps, self.des # return keypoints if available otherwise call detectAndCompute() def detect(self, frame, mask=None): # mask is a fake input with self.lock: if self.frame is not frame: self.detectAndCompute(frame) return self.kps # return descriptors if available otherwise call detectAndCompute() def compute(self, frame, kps=None, mask=None): # kps is a fake input, mask is a fake input with self.lock: if self.frame is not frame: Printer.orange('WARNING: R2D2 is recomputing both kps and des on last input frame', frame.shape) self.detectAndCompute(frame) return self.kps, self.des
36.393939
146
0.560262
import config config.cfg.set_lib('r2d2') import os, pdb from PIL import Image import numpy as np import torch import cv2 from threading import RLock from r2d2.tools import common from r2d2.tools.dataloader import norm_RGB from r2d2.nets.patchnet import * import argparse from utils_sys import Printer kVerbose = False def load_network(model_fn): checkpoint = torch.load(model_fn) print("\n>> Creating net = " + checkpoint['net']) net = eval(checkpoint['net']) nb_of_weights = common.model_size(net) print(f" ( Model size: {nb_of_weights/1000:.0f}K parameters )") weights = checkpoint['state_dict'] net.load_state_dict({k.replace('module.',''):v for k,v in weights.items()}) return net.eval() class NonMaxSuppression (torch.nn.Module): def __init__(self, rel_thr=0.7, rep_thr=0.7): nn.Module.__init__(self) self.max_filter = torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.rel_thr = rel_thr self.rep_thr = rep_thr def forward(self, reliability, repeatability, **kw): assert len(reliability) == len(repeatability) == 1 reliability, repeatability = reliability[0], repeatability[0] maxima = (repeatability == self.max_filter(repeatability)) maxima *= (repeatability >= self.rep_thr) maxima *= (reliability >= self.rel_thr) return maxima.nonzero().t()[2:4] def extract_multiscale( net, img, detector, scale_f=2**0.25, min_scale=0.0, max_scale=1, min_size=256, max_size=1024, verbose=False): old_bm = torch.backends.cudnn.benchmark torch.backends.cudnn.benchmark = False B, three, H, W = img.shape assert B == 1 and three == 3, "should be a batch with a single RGB image" assert max_scale <= 1 s = 1.0 level = 0 L = [] X,Y,S,C,Q,D = [],[],[],[],[],[] while s+0.001 >= max(min_scale, min_size / max(H,W)): if s-0.001 <= min(max_scale, max_size / max(H,W)): nh, nw = img.shape[2:] if verbose: print(f"extracting at scale x{s:.02f} = {nw:4d}x{nh:3d} - level {level}") with torch.no_grad(): res = net(imgs=[img]) descriptors = res['descriptors'][0] reliability = res['reliability'][0] repeatability = res['repeatability'][0] y,x = detector(**res) c = reliability[0,0,y,x] q = repeatability[0,0,y,x] d = descriptors[0,:,y,x].t() n = d.shape[0] X.append(x.float() * W/nw) Y.append(y.float() * H/nh) S.append((32/s) * torch.ones(n, dtype=torch.float32, device=d.device)) C.append(c) Q.append(q) D.append(d) L_tmp =level * np.ones(n,dtype=np.int32) L = np.concatenate((L, L_tmp), axis=0).astype(np.int32) level += 1 s /= scale_f nh, nw = round(H*s), round(W*s) img = F.interpolate(img, (nh,nw), mode='bilinear', align_corners=False) torch.backends.cudnn.benchmark = old_bm Y = torch.cat(Y) X = torch.cat(X) S = torch.cat(S) scores = torch.cat(C) * torch.cat(Q) XYS = torch.stack([X,Y,S], dim=-1) D = torch.cat(D) return XYS, D, scores, L def convert_pts_to_keypoints(pts, scores, sizes, levels): assert(len(pts)==len(scores)) kps = [] if pts is not None: kps = [ cv2.KeyPoint(p[0], p[1], _size=sizes[i], _response=scores[i], _octave=levels[i]) for i,p in enumerate(pts) ] return kps class R2d2Feature2D: def __init__(self, num_features = 2000, scale_f = 2**0.25, min_size = 256, max_size = 1300, min_scale = 0, max_scale = 1, reliability_thr = 0.7, repeatability_thr = 0.7, do_cuda=True): print('Using R2d2Feature2D') self.lock = RLock() self.model_base_path = config.cfg.root_folder + '/thirdparty/r2d2' self.model_weights_path = self.model_base_path + '/models/r2d2_WASF_N16.pt' self.pts = [] self.kps = [] self.des = [] self.frame = None self.num_features = num_features self.scale_f = scale_f self.min_size = min_size self.max_size = max_size self.min_scale = min_scale self.max_scale = max_scale self.reliability_thr = reliability_thr self.repeatability_thr = repeatability_thr self.do_cuda = do_cuda if do_cuda: gpus = [0] else: gpus = -1 self.gpus = gpus self.do_cuda = common.torch_set_gpu(gpus) print('==> Loading pre-trained network.') self.net = load_network(self.model_weights_path) if self.do_cuda: self.net = self.net.cuda() self.detector = NonMaxSuppression(rel_thr=reliability_thr, rep_thr=repeatability_thr) print('==> Successfully loaded pre-trained network.') def compute_kps_des(self,img): with self.lock: H, W = img.shape[:2] img = norm_RGB(img)[None] if self.do_cuda: img = img.cuda() xys, desc, scores, levels = extract_multiscale(self.net, img, self.detector, scale_f = self.scale_f, min_scale = self.min_scale, max_scale = self.max_scale, min_size = self.min_size, max_size = self.max_size, verbose = kVerbose) xys = xys.cpu().numpy() desc = desc.cpu().numpy() scores = scores.cpu().numpy() idxs = scores.argsort()[-self.num_features or None:] selected_xys = xys[idxs] self.pts = selected_xys[:,:2] sizes = selected_xys[:,2] des = desc[idxs] scores = scores[idxs] levels = np.array(levels)[idxs] kps = convert_pts_to_keypoints(self.pts, scores, sizes, levels) return kps, des def detectAndCompute(self, frame, mask=None): with self.lock: self.frame = frame self.kps, self.des = self.compute_kps_des(frame) if kVerbose: print('detector: R2D2 , descriptor: R2D2 , #features: ', len(self.kps), ', frame res: ', frame.shape[0:2]) return self.kps, self.des def detect(self, frame, mask=None): with self.lock: if self.frame is not frame: self.detectAndCompute(frame) return self.kps def compute(self, frame, kps=None, mask=None): with self.lock: if self.frame is not frame: Printer.orange('WARNING: R2D2 is recomputing both kps and des on last input frame', frame.shape) self.detectAndCompute(frame) return self.kps, self.des
true
true
1c3584d9e2de243024676c411867a89907337846
642
py
Python
tmc/ui/prompt.py
jgke/tmc.py
a061d199ecce0274c1fa554fb065e13647d9862b
[ "MIT" ]
null
null
null
tmc/ui/prompt.py
jgke/tmc.py
a061d199ecce0274c1fa554fb065e13647d9862b
[ "MIT" ]
null
null
null
tmc/ui/prompt.py
jgke/tmc.py
a061d199ecce0274c1fa554fb065e13647d9862b
[ "MIT" ]
null
null
null
def yn_prompt(msg, default=True): """ Prompts the user for yes or no. """ ret = custom_prompt(msg, ["y", "n"], "y" if default else "n") if ret == "y": return True return False def custom_prompt(msg, options, default): """ Prompts the user with custom options. """ formatted_options = [ x.upper() if x == default else x.lower() for x in options ] sure = input("{0} [{1}]: ".format(msg, "/".join(formatted_options))) if len(sure) == 0: return default for option in options: if sure.upper() == option.upper(): return option return default
25.68
72
0.563863
def yn_prompt(msg, default=True): ret = custom_prompt(msg, ["y", "n"], "y" if default else "n") if ret == "y": return True return False def custom_prompt(msg, options, default): formatted_options = [ x.upper() if x == default else x.lower() for x in options ] sure = input("{0} [{1}]: ".format(msg, "/".join(formatted_options))) if len(sure) == 0: return default for option in options: if sure.upper() == option.upper(): return option return default
true
true
1c3585df2004cca2bbaff017c9833ed1e71eb017
4,903
py
Python
tensorflow/compiler/tests/extract_image_patches_op_test.py
uve/tensorflow
e08079463bf43e5963acc41da1f57e95603f8080
[ "Apache-2.0" ]
null
null
null
tensorflow/compiler/tests/extract_image_patches_op_test.py
uve/tensorflow
e08079463bf43e5963acc41da1f57e95603f8080
[ "Apache-2.0" ]
null
null
null
tensorflow/compiler/tests/extract_image_patches_op_test.py
uve/tensorflow
e08079463bf43e5963acc41da1f57e95603f8080
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for ExtractImagePatches op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests import xla_test from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class ExtractImagePatches(xla_test.XLATestCase): """Functional tests for ExtractImagePatches op.""" def _VerifyValues(self, image, ksizes, strides, rates, padding, patches): """Tests input-output pairs for the ExtractImagePatches op. Args: image: Input tensor with shape: [batch, in_rows, in_cols, depth]. ksizes: Patch size specified as: [ksize_rows, ksize_cols]. strides: Output strides, specified as [stride_rows, stride_cols]. rates: Atrous rates, specified as [rate_rows, rate_cols]. padding: Padding type. patches: Expected output. """ ksizes = [1] + ksizes + [1] strides = [1] + strides + [1] rates = [1] + rates + [1] with self.session(): image_placeholder = array_ops.placeholder(dtypes.float32) with self.test_scope(): out_tensor = array_ops.extract_image_patches( image_placeholder, ksizes=ksizes, strides=strides, rates=rates, padding=padding, name="im2col") feed_dict = {image_placeholder: image} self.assertAllClose(patches, out_tensor.eval(feed_dict=feed_dict)) def testKsize1x1Stride1x1Rate1x1(self): """Verifies that for 1x1 kernel the output equals the input.""" # [2, 3, 4, 5] image = np.reshape(range(120), [2, 3, 4, 5]) # [2, 3, 4, 5] patches = np.reshape(range(120), [2, 3, 4, 5]) for padding in ["VALID", "SAME"]: self._VerifyValues( image, ksizes=[1, 1], strides=[1, 1], rates=[1, 1], padding=padding, patches=patches) def testKsize1x1Stride2x3Rate1x1(self): """Test for 1x1 kernel and strides.""" # [2, 4, 5, 3] image = np.reshape(range(120), [2, 4, 5, 3]) # [2, 2, 2, 3] patches = image[:, ::2, ::3, :] for padding in ["VALID", "SAME"]: self._VerifyValues( image, ksizes=[1, 1], strides=[2, 3], rates=[1, 1], padding=padding, patches=patches) def testKsize2x2Stride1x1Rate1x1Valid(self): """Test for 2x2 kernel with VALID padding.""" # [1, 2, 2, 1] image = [[[[1], [2]], [[3], [4]]]] # [1, 1, 1, 4] patches = [[[[1, 2, 3, 4]]]] self._VerifyValues( image, ksizes=[2, 2], strides=[1, 1], rates=[1, 1], padding="VALID", patches=patches) def testKsize2x2Stride1x1Rate1x1Same(self): """Test for 2x2 kernel with SAME padding.""" # [1, 2, 2, 1] image = [[[[1], [2]], [[3], [4]]]] # [1, 2, 2, 4] patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]] self._VerifyValues( image, ksizes=[2, 2], strides=[1, 1], rates=[1, 1], padding="SAME", patches=patches) def testKsize2x2Stride1x1Rate2x2Valid(self): """Test for 2x2 kernel with 2x2 dilation.""" # [1, 2, 2, 1] image = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32) # [1, 2, 2, 4] patches = [[[[0, 2, 8, 10], [1, 3, 9, 11]], [[4, 6, 12, 14], [5, 7, 13, 15]]]] self._VerifyValues( image, ksizes=[2, 2], strides=[1, 1], rates=[2, 2], padding="VALID", patches=patches) def testKsize2x2Stride1x1Rate1x1ValidDepth2(self): """Test for 2x2 kernel with VALID padding.""" # [1, 2, 2, 2] image = [[[[1, 5], [2, 6]], [[3, 7], [4, 8]]]] # [1, 1, 1, 8] patches = [[[[1, 5, 2, 6, 3, 7, 4, 8]]]] self._VerifyValues( image, ksizes=[2, 2], strides=[1, 1], rates=[1, 1], padding="VALID", patches=patches) if __name__ == "__main__": test.main()
32.686667
81
0.562309
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests import xla_test from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class ExtractImagePatches(xla_test.XLATestCase): def _VerifyValues(self, image, ksizes, strides, rates, padding, patches): ksizes = [1] + ksizes + [1] strides = [1] + strides + [1] rates = [1] + rates + [1] with self.session(): image_placeholder = array_ops.placeholder(dtypes.float32) with self.test_scope(): out_tensor = array_ops.extract_image_patches( image_placeholder, ksizes=ksizes, strides=strides, rates=rates, padding=padding, name="im2col") feed_dict = {image_placeholder: image} self.assertAllClose(patches, out_tensor.eval(feed_dict=feed_dict)) def testKsize1x1Stride1x1Rate1x1(self): image = np.reshape(range(120), [2, 3, 4, 5]) patches = np.reshape(range(120), [2, 3, 4, 5]) for padding in ["VALID", "SAME"]: self._VerifyValues( image, ksizes=[1, 1], strides=[1, 1], rates=[1, 1], padding=padding, patches=patches) def testKsize1x1Stride2x3Rate1x1(self): image = np.reshape(range(120), [2, 4, 5, 3]) patches = image[:, ::2, ::3, :] for padding in ["VALID", "SAME"]: self._VerifyValues( image, ksizes=[1, 1], strides=[2, 3], rates=[1, 1], padding=padding, patches=patches) def testKsize2x2Stride1x1Rate1x1Valid(self): image = [[[[1], [2]], [[3], [4]]]] patches = [[[[1, 2, 3, 4]]]] self._VerifyValues( image, ksizes=[2, 2], strides=[1, 1], rates=[1, 1], padding="VALID", patches=patches) def testKsize2x2Stride1x1Rate1x1Same(self): image = [[[[1], [2]], [[3], [4]]]] patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]] self._VerifyValues( image, ksizes=[2, 2], strides=[1, 1], rates=[1, 1], padding="SAME", patches=patches) def testKsize2x2Stride1x1Rate2x2Valid(self): image = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32) patches = [[[[0, 2, 8, 10], [1, 3, 9, 11]], [[4, 6, 12, 14], [5, 7, 13, 15]]]] self._VerifyValues( image, ksizes=[2, 2], strides=[1, 1], rates=[2, 2], padding="VALID", patches=patches) def testKsize2x2Stride1x1Rate1x1ValidDepth2(self): image = [[[[1, 5], [2, 6]], [[3, 7], [4, 8]]]] patches = [[[[1, 5, 2, 6, 3, 7, 4, 8]]]] self._VerifyValues( image, ksizes=[2, 2], strides=[1, 1], rates=[1, 1], padding="VALID", patches=patches) if __name__ == "__main__": test.main()
true
true
1c35865c5c84b4c27bfc95f86f6862f0433e9a24
6,387
py
Python
ludwig/utils/loss_utils.py
ludwig-ai/ludw
b9d95bbdb474bc22260269de1bc094bc5455f37c
[ "Apache-2.0" ]
970
2020-12-17T15:09:20.000Z
2022-03-31T22:58:03.000Z
ludwig/utils/loss_utils.py
ludwig-ai/ludw
b9d95bbdb474bc22260269de1bc094bc5455f37c
[ "Apache-2.0" ]
503
2020-12-16T21:44:40.000Z
2022-03-31T18:21:52.000Z
ludwig/utils/loss_utils.py
ludwig-ai/ludw
b9d95bbdb474bc22260269de1bc094bc5455f37c
[ "Apache-2.0" ]
145
2020-12-18T07:38:30.000Z
2022-03-29T19:05:08.000Z
import torch def rmspe_loss(targets: torch.Tensor, predictions: torch.Tensor) -> torch.Tensor: """Root mean square percentage error.""" loss = torch.sqrt(torch.mean(((targets - predictions).float() / targets) ** 2)) return loss def mean_confidence_penalty(probabilities: torch.Tensor, num_classes: int) -> torch.Tensor: max_entropy = torch.log(torch.tensor(num_classes)) # clipping needed for avoiding log(0) = -inf entropy_per_class = torch.maximum(-probabilities * torch.log(torch.clamp(probabilities, 1e-10, 1)), 0) entropy = torch.sum(entropy_per_class, -1) penalty = (max_entropy - entropy) / max_entropy return torch.mean(penalty) # # used for categorical and sequence features # def sample_values_from_classes( # labels, # sampler, # num_classes, # negative_samples, # unique, # class_counts, # distortion, # ): # """returns sampled_values using the chosen sampler""" # if sampler == "fixed_unigram": # sampled_values = tf.random.fixed_unigram_candidate_sampler( # true_classes=labels, # num_true=1, # num_sampled=negative_samples, # unique=unique, # range_max=num_classes, # unigrams=class_counts, # distortion=distortion, # ) # elif sampler == "uniform": # sampled_values = tf.random.uniform_candidate_sampler( # true_classes=labels, # num_true=1, # num_sampled=negative_samples, # unique=unique, # range_max=num_classes, # ) # elif sampler == "log_uniform": # sampled_values = tf.random.log_uniform_candidate_sampler( # true_classes=labels, # num_true=1, # num_sampled=negative_samples, # unique=unique, # range_max=num_classes, # ) # elif sampler == "learned_unigram": # sampled_values = tf.random.learned_unigram_candidate_sampler( # true_classes=labels, # num_true=1, # num_sampled=negative_samples, # unique=unique, # range_max=num_classes, # ) # else: # raise ValueError("Unsupported sampler {}".format(sampler)) # return sampled_values # # # For categorical feature # def sampled_softmax_cross_entropy( # labels, # last_hidden, # num_classes=1, # decoder_weights=None, # decoder_biases=None, # sampler=None, # negative_samples=0, # class_counts=0, # distortion=1, # unique=False, # **kwargs # ): # labels = tf.cast(tf.expand_dims(labels, -1), tf.int64) # # sampled_values = sample_values_from_classes( # labels, # sampler, # num_classes, # negative_samples, # unique, # class_counts, # distortion, # ) # train_loss = tf.nn.sampled_softmax_loss( # weights=tf.transpose(decoder_weights), # biases=decoder_biases, # labels=labels, # inputs=last_hidden, # num_sampled=negative_samples, # num_classes=num_classes, # sampled_values=sampled_values, # ) # # return train_loss # # # # custom class to support Laplace smoothing of Fixed Unigram candidate sampler # # Required because of zeros returned in the true_expected_count for # # <PAD> and <UNK> tokens in loss['class_counts'] list # class FixedUnigramCandidateSampler( # collections.namedtuple( # "FixedUnigramCandidateSampler", # ( # "sampled_candidates", # "true_expected_count", # "sampled_expected_count", # ), # ) # ): # pass # # # # For sequence feature # def sequence_sampled_softmax_cross_entropy( # targets, train_logits, decoder_weights, decoder_biases, num_classes, **loss # ): # batch_max_targets_sequence_length = tf.shape(targets)[1] # targets_sequence_length = sequence_length_2D(tf.cast(targets, tf.int64)) # batch_max_train_logits_sequence_length = tf.shape(train_logits)[1] # # logits_pad_len = tf.maximum( # 0, # batch_max_targets_sequence_length # - batch_max_train_logits_sequence_length, # ) # targets_pad_len = tf.maximum( # 0, # batch_max_train_logits_sequence_length # - batch_max_targets_sequence_length, # ) # # padded_logits = tf.pad(train_logits, [[0, 0], [0, logits_pad_len], [0, 0]]) # padded_targets = tf.pad(targets, [[0, 0], [0, targets_pad_len]]) # # output_exp = tf.cast(tf.reshape(padded_targets, [-1, 1]), tf.int64) # sampled_values = sample_values_from_classes( # output_exp, # loss["sampler"], # num_classes, # loss["negative_samples"], # loss["unique"], # loss["class_counts"], # loss["distortion"], # ) # # if loss["sampler"] == "fixed_unigram": # # regenerate sampled_values structure for specified samplers # # to handle any zero values in true_expected_count tensor # sampled_values = FixedUnigramCandidateSampler( # sampled_values.sampled_candidates, # # add smoothing constant EPSILON to handle any zero values # tf.add(sampled_values.true_expected_count, EPSILON), # sampled_values.sampled_expected_count, # ) # # def _sampled_loss(labels, logits): # labels = tf.cast(labels, tf.int64) # labels = tf.reshape(labels, [-1, 1]) # logits = tf.cast(logits, tf.float32) # # return tf.cast( # tf.nn.sampled_softmax_loss( # weights=tf.transpose(decoder_weights), # biases=decoder_biases, # labels=labels, # inputs=logits, # num_sampled=loss["negative_samples"], # num_classes=num_classes, # sampled_values=sampled_values, # ), # tf.float32, # ) # # train_loss = tfa.seq2seq.sequence_loss( # padded_logits, # padded_targets, # tf.sequence_mask( # targets_sequence_length, # tf.shape(padded_targets)[1], # dtype=tf.float32, # ), # average_across_timesteps=True, # average_across_batch=False, # softmax_loss_function=_sampled_loss, # ) # # return train_loss # #
31.776119
106
0.60811
import torch def rmspe_loss(targets: torch.Tensor, predictions: torch.Tensor) -> torch.Tensor: loss = torch.sqrt(torch.mean(((targets - predictions).float() / targets) ** 2)) return loss def mean_confidence_penalty(probabilities: torch.Tensor, num_classes: int) -> torch.Tensor: max_entropy = torch.log(torch.tensor(num_classes)) entropy_per_class = torch.maximum(-probabilities * torch.log(torch.clamp(probabilities, 1e-10, 1)), 0) entropy = torch.sum(entropy_per_class, -1) penalty = (max_entropy - entropy) / max_entropy return torch.mean(penalty)
true
true
1c3586bf286862eddfc75f853f4893e56deed7fb
6,750
py
Python
robit/core/cron.py
stratusadv/robit
7e0414d0ed3d98bb2c9a8785bf36961ac08f1d27
[ "MIT" ]
null
null
null
robit/core/cron.py
stratusadv/robit
7e0414d0ed3d98bb2c9a8785bf36961ac08f1d27
[ "MIT" ]
1
2021-11-01T18:51:04.000Z
2021-11-01T18:51:04.000Z
robit/core/cron.py
stratusadv/robit
7e0414d0ed3d98bb2c9a8785bf36961ac08f1d27
[ "MIT" ]
null
null
null
import ast from datetime import datetime, timedelta import calendar from robit.core.clock import CREATED_DATE_FORMAT class Cron: def __init__( self, value: str, utc_offset: int = 0, ): self.value = value self.utc_offset = utc_offset cron_list = self.value.split(' ') if len(cron_list) != 5: raise ValueError(f'Cron string {self.value} is not the correct format. Should be 5 elements in a string "* * * * *"') self.minute = CronValue(cron_list[0]) self.hour = CronValue(cron_list[1]) self.day_of_month = CronValue(cron_list[2]) self.month = CronValue(cron_list[3]) self.day_of_week = CronValue(cron_list[4]) self.next_datetime = None self.set_next_datetime() def as_dict(self): return { 'next_run_datetime': self.next_run_datetime_verbose, } def is_past_next_datetime(self): if (datetime.utcnow().replace(second=0, microsecond=0) + timedelta(hours=self.utc_offset)) >= self.next_datetime: self.set_next_datetime() return True else: return False def is_past_next_run_datetime(self): if self.is_past_next_datetime(): return True else: return False @property def next_run_datetime_verbose(self): return self.next_datetime.strftime(CREATED_DATE_FORMAT) def set_next_datetime(self): ndt = datetime.utcnow().replace(second=0, microsecond=0) + timedelta(hours=self.utc_offset) now = datetime.utcnow().replace(second=0, microsecond=0) + timedelta(hours=self.utc_offset) # Minute if self.minute.function == 'every': ndt += timedelta(minutes=1) elif self.minute.function == 'specific': ndt = ndt.replace(minute=self.minute.specific) if now.minute >= ndt.minute: ndt += timedelta(hours=1) elif self.minute.function == 'step': count_step_list = [0] count = self.minute.step while count < 60: count_step_list.append(count) count += self.minute.step for count_step in count_step_list: if count_step > now.minute: ndt = ndt.replace(minute=count_step) break else: ndt = ndt.replace(minute=0) ndt += timedelta(hours=1) # Hour if self.hour.function == 'every': pass if self.hour.function == 'specific': ndt = ndt.replace(hour=self.hour.specific) if ndt.hour == self.hour.specific: if self.minute.function == 'specific': if now.minute >= ndt.minute: ndt += timedelta(days=1) if ndt.hour > self.hour.specific: ndt += timedelta(days=1) elif self.hour.function == 'step': count_step_list = [0] count = self.hour.step while count < 24: count_step_list.append(count) count += self.hour.step for count_step in count_step_list: if count_step > now.hour: ndt = ndt.replace(hour=count_step) break else: ndt = ndt.replace(hour=0) ndt += timedelta(days=1) # Day of Month if self.day_of_month.function == 'every': pass elif self.day_of_month.function == 'specific': ndt = ndt.replace(day=self.day_of_month.specific) if ndt.month == 12: next_month = 1 else: next_month = ndt.month + 1 if ndt.day == self.day_of_month.specific: if self.hour.function == 'specific': if now.hour >= ndt.hour: ndt = ndt.replace(month=next_month) if ndt.day > self.day_of_month.specific: ndt = ndt.replace(month=next_month) # Month if self.month.function == 'every': pass elif self.month.function == 'specific': ndt = ndt.replace(month=self.month.specific) if ndt.month == self.month.specific: if self.day_of_month.function == 'specific': if now.day >= ndt.day: ndt = ndt.replace(year=(ndt.year + 1)) if now.month > ndt.month: ndt = ndt.replace(year=(ndt.year + 1)) # Day of Week if self.day_of_week.function == 'every': pass if self.day_of_week.function == 'specific': if self.day_of_week.specific == 0: self.day_of_week.specific = 7 if ndt.isoweekday() == self.day_of_week.specific: pass # print(f'{now.isoweekday() = } {ndt.isoweekday() = } {self.day_of_week.specific = }') # if self.hour.function == 'specific': # if now.hour >= ndt.hour: # ndt += timedelta(days=7) elif ndt.isoweekday() > self.day_of_week.specific: ndt += timedelta(days=(7 - (ndt.isoweekday() - self.day_of_week.specific))) else: ndt += timedelta(days=(self.day_of_week.specific - ndt.isoweekday())) self.next_datetime = ndt class CronValue: def __init__(self, value: str, ): self.value = value FUNCTION_CHOICES = ('every', 'specific', 'range', 'step') self.function = None self.specific = None self.range_start = None self.range_stop = None self.step_start = None self.step = None self.process() def process(self): range_list = self.value.split('-') value_error = 'Invalid cron string used.' if len(range_list) == 2: self.function = 'range' self.range_start = int(range_list[0]) self.range_stop = int(range_list[1]) elif len(step_list := self.value.split('/')) == 2: if step_list[0] == '*': self.function = 'step' self.step_start = step_list[0] self.step = int(step_list[1]) else: raise ValueError(value_error) else: if range_list[0] == '*': self.function = 'every' elif isinstance(ast.literal_eval(range_list[0]), int): self.function = 'specific' self.specific = int(range_list[0]) else: raise ValueError(value_error)
30.542986
129
0.531556
import ast from datetime import datetime, timedelta import calendar from robit.core.clock import CREATED_DATE_FORMAT class Cron: def __init__( self, value: str, utc_offset: int = 0, ): self.value = value self.utc_offset = utc_offset cron_list = self.value.split(' ') if len(cron_list) != 5: raise ValueError(f'Cron string {self.value} is not the correct format. Should be 5 elements in a string "* * * * *"') self.minute = CronValue(cron_list[0]) self.hour = CronValue(cron_list[1]) self.day_of_month = CronValue(cron_list[2]) self.month = CronValue(cron_list[3]) self.day_of_week = CronValue(cron_list[4]) self.next_datetime = None self.set_next_datetime() def as_dict(self): return { 'next_run_datetime': self.next_run_datetime_verbose, } def is_past_next_datetime(self): if (datetime.utcnow().replace(second=0, microsecond=0) + timedelta(hours=self.utc_offset)) >= self.next_datetime: self.set_next_datetime() return True else: return False def is_past_next_run_datetime(self): if self.is_past_next_datetime(): return True else: return False @property def next_run_datetime_verbose(self): return self.next_datetime.strftime(CREATED_DATE_FORMAT) def set_next_datetime(self): ndt = datetime.utcnow().replace(second=0, microsecond=0) + timedelta(hours=self.utc_offset) now = datetime.utcnow().replace(second=0, microsecond=0) + timedelta(hours=self.utc_offset) if self.minute.function == 'every': ndt += timedelta(minutes=1) elif self.minute.function == 'specific': ndt = ndt.replace(minute=self.minute.specific) if now.minute >= ndt.minute: ndt += timedelta(hours=1) elif self.minute.function == 'step': count_step_list = [0] count = self.minute.step while count < 60: count_step_list.append(count) count += self.minute.step for count_step in count_step_list: if count_step > now.minute: ndt = ndt.replace(minute=count_step) break else: ndt = ndt.replace(minute=0) ndt += timedelta(hours=1) if self.hour.function == 'every': pass if self.hour.function == 'specific': ndt = ndt.replace(hour=self.hour.specific) if ndt.hour == self.hour.specific: if self.minute.function == 'specific': if now.minute >= ndt.minute: ndt += timedelta(days=1) if ndt.hour > self.hour.specific: ndt += timedelta(days=1) elif self.hour.function == 'step': count_step_list = [0] count = self.hour.step while count < 24: count_step_list.append(count) count += self.hour.step for count_step in count_step_list: if count_step > now.hour: ndt = ndt.replace(hour=count_step) break else: ndt = ndt.replace(hour=0) ndt += timedelta(days=1) if self.day_of_month.function == 'every': pass elif self.day_of_month.function == 'specific': ndt = ndt.replace(day=self.day_of_month.specific) if ndt.month == 12: next_month = 1 else: next_month = ndt.month + 1 if ndt.day == self.day_of_month.specific: if self.hour.function == 'specific': if now.hour >= ndt.hour: ndt = ndt.replace(month=next_month) if ndt.day > self.day_of_month.specific: ndt = ndt.replace(month=next_month) if self.month.function == 'every': pass elif self.month.function == 'specific': ndt = ndt.replace(month=self.month.specific) if ndt.month == self.month.specific: if self.day_of_month.function == 'specific': if now.day >= ndt.day: ndt = ndt.replace(year=(ndt.year + 1)) if now.month > ndt.month: ndt = ndt.replace(year=(ndt.year + 1)) if self.day_of_week.function == 'every': pass if self.day_of_week.function == 'specific': if self.day_of_week.specific == 0: self.day_of_week.specific = 7 if ndt.isoweekday() == self.day_of_week.specific: pass elif ndt.isoweekday() > self.day_of_week.specific: ndt += timedelta(days=(7 - (ndt.isoweekday() - self.day_of_week.specific))) else: ndt += timedelta(days=(self.day_of_week.specific - ndt.isoweekday())) self.next_datetime = ndt class CronValue: def __init__(self, value: str, ): self.value = value FUNCTION_CHOICES = ('every', 'specific', 'range', 'step') self.function = None self.specific = None self.range_start = None self.range_stop = None self.step_start = None self.step = None self.process() def process(self): range_list = self.value.split('-') value_error = 'Invalid cron string used.' if len(range_list) == 2: self.function = 'range' self.range_start = int(range_list[0]) self.range_stop = int(range_list[1]) elif len(step_list := self.value.split('/')) == 2: if step_list[0] == '*': self.function = 'step' self.step_start = step_list[0] self.step = int(step_list[1]) else: raise ValueError(value_error) else: if range_list[0] == '*': self.function = 'every' elif isinstance(ast.literal_eval(range_list[0]), int): self.function = 'specific' self.specific = int(range_list[0]) else: raise ValueError(value_error)
true
true
1c35884aea0a87075d7d5594df270a3e982a4767
738
py
Python
cancertype_prediction/plot_param_opt.py
DPBayes/dp-representation-transfer
0c8389cc36659a7606bceac2531eaef7663ac49c
[ "MIT" ]
1
2021-12-09T03:45:18.000Z
2021-12-09T03:45:18.000Z
cancertype_prediction/plot_param_opt.py
DPBayes/dp-representation-transfer
0c8389cc36659a7606bceac2531eaef7663ac49c
[ "MIT" ]
1
2019-10-03T19:32:29.000Z
2019-10-04T14:09:28.000Z
cancertype_prediction/plot_param_opt.py
DPBayes/dp-representation-transfer
0c8389cc36659a7606bceac2531eaef7663ac49c
[ "MIT" ]
1
2019-10-15T07:16:42.000Z
2019-10-15T07:16:42.000Z
import numpy as np import matplotlib.pyplot as plt x = np.load("param_opt/opt_params.npy") y = np.load("param_opt/opt_results.npy") domain = [ {'name': 'learning_rate_log10', 'type': 'continuous', 'domain': (-5,-1)}, {'name': 'n_hidden_layers', 'type': 'discrete', 'domain': [1, 2, 3]}, # {'name': 'repr_dim', 'type': 'continuous', 'domain': (1, 10)}, {'name': 'repr_dim', 'type': 'discrete', 'domain': [2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16]}, {'name': 'hidden_layer_size_mul_log10', 'type': 'continuous', 'domain': (0,4)}, ] assert x.shape[1] == len(domain) (fig, ax) = plt.subplots(1, len(domain), sharey=True) for i in range(len(domain)): ax[i].scatter(x[:,i], y) ax[i].set_xlabel(domain[i]['name']) plt.show()
29.52
95
0.607046
import numpy as np import matplotlib.pyplot as plt x = np.load("param_opt/opt_params.npy") y = np.load("param_opt/opt_results.npy") domain = [ {'name': 'learning_rate_log10', 'type': 'continuous', 'domain': (-5,-1)}, {'name': 'n_hidden_layers', 'type': 'discrete', 'domain': [1, 2, 3]}, {'name': 'repr_dim', 'type': 'discrete', 'domain': [2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16]}, {'name': 'hidden_layer_size_mul_log10', 'type': 'continuous', 'domain': (0,4)}, ] assert x.shape[1] == len(domain) (fig, ax) = plt.subplots(1, len(domain), sharey=True) for i in range(len(domain)): ax[i].scatter(x[:,i], y) ax[i].set_xlabel(domain[i]['name']) plt.show()
true
true
1c3589620f3ae4c5d749e81bd94ab6a5f97a9f58
551
py
Python
aiotf/__init__.py
brunoalano/aiotf
78a6b67694c4c3245f14003bd3d3f0450ac9cad2
[ "MIT" ]
8
2018-11-28T12:13:25.000Z
2021-11-08T11:19:01.000Z
aiotf/__init__.py
brunoalano/aiotf
78a6b67694c4c3245f14003bd3d3f0450ac9cad2
[ "MIT" ]
null
null
null
aiotf/__init__.py
brunoalano/aiotf
78a6b67694c4c3245f14003bd3d3f0450ac9cad2
[ "MIT" ]
null
null
null
from .__version__ import __title__, __description__, __url__, __version__ from .__version__ import __author__, __author_email__, __license__ from .__version__ import __copyright__ # Set default logging handler to avoid "No handler found" warnings. import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass from .client import AsyncTensorflowServing __all__ = ['AsyncTensorflowServing'] logging.getLogger(__name__).addHandler(NullHandler())
30.611111
73
0.798548
from .__version__ import __title__, __description__, __url__, __version__ from .__version__ import __author__, __author_email__, __license__ from .__version__ import __copyright__ import logging try: from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass from .client import AsyncTensorflowServing __all__ = ['AsyncTensorflowServing'] logging.getLogger(__name__).addHandler(NullHandler())
true
true
1c358a6e7c8cd62a3bef6015d49d8d740a1f2af8
66
py
Python
caching/__init__.py
remohammadi/django-cache-machine
da5f6edf1600a28cb6ad7f9f7ad8104f1748e674
[ "BSD-3-Clause" ]
1
2019-02-10T19:33:37.000Z
2019-02-10T19:33:37.000Z
caching/__init__.py
remohammadi/django-cache-machine
da5f6edf1600a28cb6ad7f9f7ad8104f1748e674
[ "BSD-3-Clause" ]
null
null
null
caching/__init__.py
remohammadi/django-cache-machine
da5f6edf1600a28cb6ad7f9f7ad8104f1748e674
[ "BSD-3-Clause" ]
null
null
null
VERSION = (0, '8', '1') __version__ = '.'.join(map(str, VERSION))
22
41
0.575758
VERSION = (0, '8', '1') __version__ = '.'.join(map(str, VERSION))
true
true
1c358bac3573d104c1bedc36acc256cb7d1a0465
423
py
Python
setup.py
human-ui/gym-minigrid
66a89abf236030b8a9d84a51f62acbc7605e01cd
[ "BSD-3-Clause" ]
null
null
null
setup.py
human-ui/gym-minigrid
66a89abf236030b8a9d84a51f62acbc7605e01cd
[ "BSD-3-Clause" ]
1
2020-01-06T08:49:05.000Z
2020-01-07T08:31:11.000Z
setup.py
human-ui/gym-minigrid
66a89abf236030b8a9d84a51f62acbc7605e01cd
[ "BSD-3-Clause" ]
null
null
null
from setuptools import setup setup( name='gym_minigrid', version='0.0.5', keywords='memory, environment, agent, rl, openaigym, openai-gym, gym', url='https://github.com/maximecb/gym-minigrid', description='Minimalistic gridworld package for OpenAI Gym', packages=['gym_minigrid'], install_requires=[ 'gym>=0.9.6', 'numpy>=1.15.0', 'pyqt5>=5.10.1', 'sty' ] )
24.882353
74
0.612293
from setuptools import setup setup( name='gym_minigrid', version='0.0.5', keywords='memory, environment, agent, rl, openaigym, openai-gym, gym', url='https://github.com/maximecb/gym-minigrid', description='Minimalistic gridworld package for OpenAI Gym', packages=['gym_minigrid'], install_requires=[ 'gym>=0.9.6', 'numpy>=1.15.0', 'pyqt5>=5.10.1', 'sty' ] )
true
true
1c358c09ae78dec70f326486988c762b5b1893f8
17,071
py
Python
PythonAPI/carissma_project/lib/python3.5/site-packages/matplotlib/tests/test_colorbar.py
AbdulHoffmann/carla_carissma
8d382769ffa02a6c61a22c57160285505f5ff0a4
[ "MIT" ]
366
2019-04-07T20:34:48.000Z
2022-03-29T07:35:38.000Z
venv/lib/python3.7/site-packages/matplotlib/tests/test_colorbar.py
John1001Song/Big-Data-Robo-Adviser
9444dce96954c546333d5aecc92a06c3bfd19aa5
[ "MIT" ]
12
2018-12-06T22:06:49.000Z
2022-02-25T17:40:44.000Z
venv/lib/python3.7/site-packages/matplotlib/tests/test_colorbar.py
John1001Song/Big-Data-Robo-Adviser
9444dce96954c546333d5aecc92a06c3bfd19aa5
[ "MIT" ]
64
2018-04-25T08:51:57.000Z
2022-01-29T14:13:57.000Z
import numpy as np import pytest from matplotlib import rc_context from matplotlib.testing.decorators import image_comparison import matplotlib.pyplot as plt from matplotlib.colors import BoundaryNorm, LogNorm, PowerNorm from matplotlib.cm import get_cmap from matplotlib.colorbar import ColorbarBase from matplotlib.ticker import LogLocator, LogFormatter def _get_cmap_norms(): """ Define a colormap and appropriate norms for each of the four possible settings of the extend keyword. Helper function for _colorbar_extension_shape and colorbar_extension_length. """ # Create a color map and specify the levels it represents. cmap = get_cmap("RdBu", lut=5) clevs = [-5., -2.5, -.5, .5, 1.5, 3.5] # Define norms for the color maps. norms = dict() norms['neither'] = BoundaryNorm(clevs, len(clevs) - 1) norms['min'] = BoundaryNorm([-10] + clevs[1:], len(clevs) - 1) norms['max'] = BoundaryNorm(clevs[:-1] + [10], len(clevs) - 1) norms['both'] = BoundaryNorm([-10] + clevs[1:-1] + [10], len(clevs) - 1) return cmap, norms def _colorbar_extension_shape(spacing): ''' Produce 4 colorbars with rectangular extensions for either uniform or proportional spacing. Helper function for test_colorbar_extension_shape. ''' # Get a colormap and appropriate norms for each extension type. cmap, norms = _get_cmap_norms() # Create a figure and adjust whitespace for subplots. fig = plt.figure() fig.subplots_adjust(hspace=4) for i, extension_type in enumerate(('neither', 'min', 'max', 'both')): # Get the appropriate norm and use it to get colorbar boundaries. norm = norms[extension_type] boundaries = values = norm.boundaries # Create a subplot. cax = fig.add_subplot(4, 1, i + 1) # Generate the colorbar. cb = ColorbarBase(cax, cmap=cmap, norm=norm, boundaries=boundaries, values=values, extend=extension_type, extendrect=True, orientation='horizontal', spacing=spacing) # Turn off text and ticks. cax.tick_params(left=False, labelleft=False, bottom=False, labelbottom=False) # Return the figure to the caller. return fig def _colorbar_extension_length(spacing): ''' Produce 12 colorbars with variable length extensions for either uniform or proportional spacing. Helper function for test_colorbar_extension_length. ''' # Get a colormap and appropriate norms for each extension type. cmap, norms = _get_cmap_norms() # Create a figure and adjust whitespace for subplots. fig = plt.figure() fig.subplots_adjust(hspace=.6) for i, extension_type in enumerate(('neither', 'min', 'max', 'both')): # Get the appropriate norm and use it to get colorbar boundaries. norm = norms[extension_type] boundaries = values = norm.boundaries for j, extendfrac in enumerate((None, 'auto', 0.1)): # Create a subplot. cax = fig.add_subplot(12, 1, i*3 + j + 1) # Generate the colorbar. ColorbarBase(cax, cmap=cmap, norm=norm, boundaries=boundaries, values=values, extend=extension_type, extendfrac=extendfrac, orientation='horizontal', spacing=spacing) # Turn off text and ticks. cax.tick_params(left=False, labelleft=False, bottom=False, labelbottom=False) # Return the figure to the caller. return fig @image_comparison( baseline_images=['colorbar_extensions_shape_uniform', 'colorbar_extensions_shape_proportional'], extensions=['png']) def test_colorbar_extension_shape(): '''Test rectangular colorbar extensions.''' # Create figures for uniform and proportionally spaced colorbars. _colorbar_extension_shape('uniform') _colorbar_extension_shape('proportional') @image_comparison(baseline_images=['colorbar_extensions_uniform', 'colorbar_extensions_proportional'], extensions=['png']) def test_colorbar_extension_length(): '''Test variable length colorbar extensions.''' # Create figures for uniform and proportionally spaced colorbars. _colorbar_extension_length('uniform') _colorbar_extension_length('proportional') @image_comparison(baseline_images=['cbar_with_orientation', 'cbar_locationing', 'double_cbar', 'cbar_sharing', ], extensions=['png'], remove_text=True, savefig_kwarg={'dpi': 40}) def test_colorbar_positioning(): data = np.arange(1200).reshape(30, 40) levels = [0, 200, 400, 600, 800, 1000, 1200] # ------------------- plt.figure() plt.contourf(data, levels=levels) plt.colorbar(orientation='horizontal', use_gridspec=False) locations = ['left', 'right', 'top', 'bottom'] plt.figure() for i, location in enumerate(locations): plt.subplot(2, 2, i + 1) plt.contourf(data, levels=levels) plt.colorbar(location=location, use_gridspec=False) # ------------------- plt.figure() # make some other data (random integers) data_2nd = np.array([[2, 3, 2, 3], [1.5, 2, 2, 3], [2, 3, 3, 4]]) # make the random data expand to the shape of the main data data_2nd = np.repeat(np.repeat(data_2nd, 10, axis=1), 10, axis=0) color_mappable = plt.contourf(data, levels=levels, extend='both') # test extend frac here hatch_mappable = plt.contourf(data_2nd, levels=[1, 2, 3], colors='none', hatches=['/', 'o', '+'], extend='max') plt.contour(hatch_mappable, colors='black') plt.colorbar(color_mappable, location='left', label='variable 1', use_gridspec=False) plt.colorbar(hatch_mappable, location='right', label='variable 2', use_gridspec=False) # ------------------- plt.figure() ax1 = plt.subplot(211, anchor='NE', aspect='equal') plt.contourf(data, levels=levels) ax2 = plt.subplot(223) plt.contourf(data, levels=levels) ax3 = plt.subplot(224) plt.contourf(data, levels=levels) plt.colorbar(ax=[ax2, ax3, ax1], location='right', pad=0.0, shrink=0.5, panchor=False, use_gridspec=False) plt.colorbar(ax=[ax2, ax3, ax1], location='left', shrink=0.5, panchor=False, use_gridspec=False) plt.colorbar(ax=[ax1], location='bottom', panchor=False, anchor=(0.8, 0.5), shrink=0.6, use_gridspec=False) @image_comparison(baseline_images=['cbar_with_subplots_adjust'], extensions=['png'], remove_text=True, savefig_kwarg={'dpi': 40}) def test_gridspec_make_colorbar(): plt.figure() data = np.arange(1200).reshape(30, 40) levels = [0, 200, 400, 600, 800, 1000, 1200] plt.subplot(121) plt.contourf(data, levels=levels) plt.colorbar(use_gridspec=True, orientation='vertical') plt.subplot(122) plt.contourf(data, levels=levels) plt.colorbar(use_gridspec=True, orientation='horizontal') plt.subplots_adjust(top=0.95, right=0.95, bottom=0.2, hspace=0.25) @image_comparison(baseline_images=['colorbar_single_scatter'], extensions=['png'], remove_text=True, savefig_kwarg={'dpi': 40}) def test_colorbar_single_scatter(): # Issue #2642: if a path collection has only one entry, # the norm scaling within the colorbar must ensure a # finite range, otherwise a zero denominator will occur in _locate. plt.figure() x = np.arange(4) y = x.copy() z = np.ma.masked_greater(np.arange(50, 54), 50) cmap = plt.get_cmap('jet', 16) cs = plt.scatter(x, y, z, c=z, cmap=cmap) plt.colorbar(cs) @pytest.mark.parametrize('use_gridspec', [False, True], ids=['no gridspec', 'with gridspec']) def test_remove_from_figure(use_gridspec): """ Test `remove_from_figure` with the specified ``use_gridspec`` setting """ fig, ax = plt.subplots() sc = ax.scatter([1, 2], [3, 4], cmap="spring") sc.set_array(np.array([5, 6])) pre_figbox = np.array(ax.figbox) cb = fig.colorbar(sc, use_gridspec=use_gridspec) fig.subplots_adjust() cb.remove() fig.subplots_adjust() post_figbox = np.array(ax.figbox) assert (pre_figbox == post_figbox).all() def test_colorbarbase(): # smoke test from #3805 ax = plt.gca() ColorbarBase(ax, plt.cm.bone) @image_comparison( baseline_images=['colorbar_closed_patch'], remove_text=True) def test_colorbar_closed_patch(): fig = plt.figure(figsize=(8, 6)) ax1 = fig.add_axes([0.05, 0.85, 0.9, 0.1]) ax2 = fig.add_axes([0.1, 0.65, 0.75, 0.1]) ax3 = fig.add_axes([0.05, 0.45, 0.9, 0.1]) ax4 = fig.add_axes([0.05, 0.25, 0.9, 0.1]) ax5 = fig.add_axes([0.05, 0.05, 0.9, 0.1]) cmap = get_cmap("RdBu", lut=5) im = ax1.pcolormesh(np.linspace(0, 10, 16).reshape((4, 4)), cmap=cmap) values = np.linspace(0, 10, 5) with rc_context({'axes.linewidth': 16}): plt.colorbar(im, cax=ax2, cmap=cmap, orientation='horizontal', extend='both', extendfrac=0.5, values=values) plt.colorbar(im, cax=ax3, cmap=cmap, orientation='horizontal', extend='both', values=values) plt.colorbar(im, cax=ax4, cmap=cmap, orientation='horizontal', extend='both', extendrect=True, values=values) plt.colorbar(im, cax=ax5, cmap=cmap, orientation='horizontal', extend='neither', values=values) def test_colorbar_ticks(): # test fix for #5673 fig, ax = plt.subplots() x = np.arange(-3.0, 4.001) y = np.arange(-4.0, 3.001) X, Y = np.meshgrid(x, y) Z = X * Y clevs = np.array([-12, -5, 0, 5, 12], dtype=float) colors = ['r', 'g', 'b', 'c'] cs = ax.contourf(X, Y, Z, clevs, colors=colors) cbar = fig.colorbar(cs, ax=ax, extend='neither', orientation='horizontal', ticks=clevs) assert len(cbar.ax.xaxis.get_ticklocs()) == len(clevs) def test_colorbar_minorticks_on_off(): # test for github issue #11510 and PR #11584 np.random.seed(seed=12345) data = np.random.randn(20, 20) with rc_context({'_internal.classic_mode': False}): fig, ax = plt.subplots() # purposefully setting vmin and vmax to odd fractions # so as to check for the correct locations of the minor ticks im = ax.pcolormesh(data, vmin=-2.3, vmax=3.3) cbar = fig.colorbar(im, extend='both') cbar.minorticks_on() correct_minorticklocs = np.array([-2.2, -1.8, -1.6, -1.4, -1.2, -0.8, -0.6, -0.4, -0.2, 0.2, 0.4, 0.6, 0.8, 1.2, 1.4, 1.6, 1.8, 2.2, 2.4, 2.6, 2.8, 3.2]) # testing after minorticks_on() np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(), correct_minorticklocs) cbar.minorticks_off() # testing after minorticks_off() np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(), np.array([])) im.set_clim(vmin=-1.2, vmax=1.2) cbar.minorticks_on() correct_minorticklocs = np.array([-1.2, -1.1, -0.9, -0.8, -0.7, -0.6, -0.4, -0.3, -0.2, -0.1, 0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 1.1, 1.2]) np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(), correct_minorticklocs) def test_colorbar_autoticks(): # Test new autotick modes. Needs to be classic because # non-classic doesn't go this route. with rc_context({'_internal.classic_mode': False}): fig, ax = plt.subplots(2, 1) x = np.arange(-3.0, 4.001) y = np.arange(-4.0, 3.001) X, Y = np.meshgrid(x, y) Z = X * Y pcm = ax[0].pcolormesh(X, Y, Z) cbar = fig.colorbar(pcm, ax=ax[0], extend='both', orientation='vertical') pcm = ax[1].pcolormesh(X, Y, Z) cbar2 = fig.colorbar(pcm, ax=ax[1], extend='both', orientation='vertical', shrink=0.4) np.testing.assert_almost_equal(cbar.ax.yaxis.get_ticklocs(), np.arange(-10, 11., 5.)) np.testing.assert_almost_equal(cbar2.ax.yaxis.get_ticklocs(), np.arange(-10, 11., 10.)) def test_colorbar_autotickslog(): # Test new autotick modes... with rc_context({'_internal.classic_mode': False}): fig, ax = plt.subplots(2, 1) x = np.arange(-3.0, 4.001) y = np.arange(-4.0, 3.001) X, Y = np.meshgrid(x, y) Z = X * Y pcm = ax[0].pcolormesh(X, Y, 10**Z, norm=LogNorm()) cbar = fig.colorbar(pcm, ax=ax[0], extend='both', orientation='vertical') pcm = ax[1].pcolormesh(X, Y, 10**Z, norm=LogNorm()) cbar2 = fig.colorbar(pcm, ax=ax[1], extend='both', orientation='vertical', shrink=0.4) np.testing.assert_almost_equal(cbar.ax.yaxis.get_ticklocs(), 10**np.arange(-12, 12.2, 4.)) np.testing.assert_almost_equal(cbar2.ax.yaxis.get_ticklocs(), 10**np.arange(-12, 13., 12.)) def test_colorbar_get_ticks(): # test feature for #5792 plt.figure() data = np.arange(1200).reshape(30, 40) levels = [0, 200, 400, 600, 800, 1000, 1200] plt.subplot() plt.contourf(data, levels=levels) # testing getter for user set ticks userTicks = plt.colorbar(ticks=[0, 600, 1200]) assert userTicks.get_ticks().tolist() == [0, 600, 1200] # testing for getter after calling set_ticks userTicks.set_ticks([600, 700, 800]) assert userTicks.get_ticks().tolist() == [600, 700, 800] # testing for getter after calling set_ticks with some ticks out of bounds userTicks.set_ticks([600, 1300, 1400, 1500]) assert userTicks.get_ticks().tolist() == [600] # testing getter when no ticks are assigned defTicks = plt.colorbar(orientation='horizontal') assert defTicks.get_ticks().tolist() == levels def test_colorbar_lognorm_extension(): # Test that colorbar with lognorm is extended correctly f, ax = plt.subplots() cb = ColorbarBase(ax, norm=LogNorm(vmin=0.1, vmax=1000.0), orientation='vertical', extend='both') assert cb._values[0] >= 0.0 def test_colorbar_powernorm_extension(): # Test that colorbar with powernorm is extended correctly f, ax = plt.subplots() cb = ColorbarBase(ax, norm=PowerNorm(gamma=0.5, vmin=0.0, vmax=1.0), orientation='vertical', extend='both') assert cb._values[0] >= 0.0 def test_colorbar_axes_kw(): # test fix for #8493: This does only test, that axes-related keywords pass # and do not raise an exception. plt.figure() plt.imshow(([[1, 2], [3, 4]])) plt.colorbar(orientation='horizontal', fraction=0.2, pad=0.2, shrink=0.5, aspect=10, anchor=(0., 0.), panchor=(0., 1.)) def test_colorbar_log_minortick_labels(): with rc_context({'_internal.classic_mode': False}): fig, ax = plt.subplots() pcm = ax.imshow([[10000, 50000]], norm=LogNorm()) cb = fig.colorbar(pcm) fig.canvas.draw() lb = cb.ax.yaxis.get_ticklabels(which='both') expected = [r'$\mathdefault{10^{4}}$', r'$\mathdefault{2\times10^{4}}$', r'$\mathdefault{3\times10^{4}}$', r'$\mathdefault{4\times10^{4}}$'] for l, exp in zip(lb, expected): assert l.get_text() == exp def test_colorbar_renorm(): x, y = np.ogrid[-4:4:31j, -4:4:31j] z = 120000*np.exp(-x**2 - y**2) fig, ax = plt.subplots() im = ax.imshow(z) cbar = fig.colorbar(im) norm = LogNorm(z.min(), z.max()) im.set_norm(norm) cbar.set_norm(norm) cbar.locator = LogLocator() cbar.formatter = LogFormatter() cbar.update_normal(im) assert np.isclose(cbar.vmin, z.min()) norm = LogNorm(z.min() * 1000, z.max() * 1000) im.set_norm(norm) cbar.set_norm(norm) cbar.update_normal(im) assert np.isclose(cbar.vmin, z.min() * 1000) assert np.isclose(cbar.vmax, z.max() * 1000) def test_colorbar_get_ticks(): with rc_context({'_internal.classic_mode': False}): fig, ax = plt. subplots() np.random.seed(19680801) pc = ax.pcolormesh(np.random.rand(30, 30)) cb = fig.colorbar(pc) np.testing.assert_allclose(cb.get_ticks(), [0.2, 0.4, 0.6, 0.8])
38.020045
78
0.600785
import numpy as np import pytest from matplotlib import rc_context from matplotlib.testing.decorators import image_comparison import matplotlib.pyplot as plt from matplotlib.colors import BoundaryNorm, LogNorm, PowerNorm from matplotlib.cm import get_cmap from matplotlib.colorbar import ColorbarBase from matplotlib.ticker import LogLocator, LogFormatter def _get_cmap_norms(): cmap = get_cmap("RdBu", lut=5) clevs = [-5., -2.5, -.5, .5, 1.5, 3.5] norms = dict() norms['neither'] = BoundaryNorm(clevs, len(clevs) - 1) norms['min'] = BoundaryNorm([-10] + clevs[1:], len(clevs) - 1) norms['max'] = BoundaryNorm(clevs[:-1] + [10], len(clevs) - 1) norms['both'] = BoundaryNorm([-10] + clevs[1:-1] + [10], len(clevs) - 1) return cmap, norms def _colorbar_extension_shape(spacing): cmap, norms = _get_cmap_norms() fig = plt.figure() fig.subplots_adjust(hspace=4) for i, extension_type in enumerate(('neither', 'min', 'max', 'both')): norm = norms[extension_type] boundaries = values = norm.boundaries cax = fig.add_subplot(4, 1, i + 1) cb = ColorbarBase(cax, cmap=cmap, norm=norm, boundaries=boundaries, values=values, extend=extension_type, extendrect=True, orientation='horizontal', spacing=spacing) cax.tick_params(left=False, labelleft=False, bottom=False, labelbottom=False) return fig def _colorbar_extension_length(spacing): cmap, norms = _get_cmap_norms() fig = plt.figure() fig.subplots_adjust(hspace=.6) for i, extension_type in enumerate(('neither', 'min', 'max', 'both')): norm = norms[extension_type] boundaries = values = norm.boundaries for j, extendfrac in enumerate((None, 'auto', 0.1)): cax = fig.add_subplot(12, 1, i*3 + j + 1) ColorbarBase(cax, cmap=cmap, norm=norm, boundaries=boundaries, values=values, extend=extension_type, extendfrac=extendfrac, orientation='horizontal', spacing=spacing) cax.tick_params(left=False, labelleft=False, bottom=False, labelbottom=False) return fig @image_comparison( baseline_images=['colorbar_extensions_shape_uniform', 'colorbar_extensions_shape_proportional'], extensions=['png']) def test_colorbar_extension_shape(): _colorbar_extension_shape('uniform') _colorbar_extension_shape('proportional') @image_comparison(baseline_images=['colorbar_extensions_uniform', 'colorbar_extensions_proportional'], extensions=['png']) def test_colorbar_extension_length(): _colorbar_extension_length('uniform') _colorbar_extension_length('proportional') @image_comparison(baseline_images=['cbar_with_orientation', 'cbar_locationing', 'double_cbar', 'cbar_sharing', ], extensions=['png'], remove_text=True, savefig_kwarg={'dpi': 40}) def test_colorbar_positioning(): data = np.arange(1200).reshape(30, 40) levels = [0, 200, 400, 600, 800, 1000, 1200] plt.figure() plt.contourf(data, levels=levels) plt.colorbar(orientation='horizontal', use_gridspec=False) locations = ['left', 'right', 'top', 'bottom'] plt.figure() for i, location in enumerate(locations): plt.subplot(2, 2, i + 1) plt.contourf(data, levels=levels) plt.colorbar(location=location, use_gridspec=False) plt.figure() data_2nd = np.array([[2, 3, 2, 3], [1.5, 2, 2, 3], [2, 3, 3, 4]]) data_2nd = np.repeat(np.repeat(data_2nd, 10, axis=1), 10, axis=0) color_mappable = plt.contourf(data, levels=levels, extend='both') hatch_mappable = plt.contourf(data_2nd, levels=[1, 2, 3], colors='none', hatches=['/', 'o', '+'], extend='max') plt.contour(hatch_mappable, colors='black') plt.colorbar(color_mappable, location='left', label='variable 1', use_gridspec=False) plt.colorbar(hatch_mappable, location='right', label='variable 2', use_gridspec=False) plt.figure() ax1 = plt.subplot(211, anchor='NE', aspect='equal') plt.contourf(data, levels=levels) ax2 = plt.subplot(223) plt.contourf(data, levels=levels) ax3 = plt.subplot(224) plt.contourf(data, levels=levels) plt.colorbar(ax=[ax2, ax3, ax1], location='right', pad=0.0, shrink=0.5, panchor=False, use_gridspec=False) plt.colorbar(ax=[ax2, ax3, ax1], location='left', shrink=0.5, panchor=False, use_gridspec=False) plt.colorbar(ax=[ax1], location='bottom', panchor=False, anchor=(0.8, 0.5), shrink=0.6, use_gridspec=False) @image_comparison(baseline_images=['cbar_with_subplots_adjust'], extensions=['png'], remove_text=True, savefig_kwarg={'dpi': 40}) def test_gridspec_make_colorbar(): plt.figure() data = np.arange(1200).reshape(30, 40) levels = [0, 200, 400, 600, 800, 1000, 1200] plt.subplot(121) plt.contourf(data, levels=levels) plt.colorbar(use_gridspec=True, orientation='vertical') plt.subplot(122) plt.contourf(data, levels=levels) plt.colorbar(use_gridspec=True, orientation='horizontal') plt.subplots_adjust(top=0.95, right=0.95, bottom=0.2, hspace=0.25) @image_comparison(baseline_images=['colorbar_single_scatter'], extensions=['png'], remove_text=True, savefig_kwarg={'dpi': 40}) def test_colorbar_single_scatter(): ) y = x.copy() z = np.ma.masked_greater(np.arange(50, 54), 50) cmap = plt.get_cmap('jet', 16) cs = plt.scatter(x, y, z, c=z, cmap=cmap) plt.colorbar(cs) @pytest.mark.parametrize('use_gridspec', [False, True], ids=['no gridspec', 'with gridspec']) def test_remove_from_figure(use_gridspec): fig, ax = plt.subplots() sc = ax.scatter([1, 2], [3, 4], cmap="spring") sc.set_array(np.array([5, 6])) pre_figbox = np.array(ax.figbox) cb = fig.colorbar(sc, use_gridspec=use_gridspec) fig.subplots_adjust() cb.remove() fig.subplots_adjust() post_figbox = np.array(ax.figbox) assert (pre_figbox == post_figbox).all() def test_colorbarbase(): ax = plt.gca() ColorbarBase(ax, plt.cm.bone) @image_comparison( baseline_images=['colorbar_closed_patch'], remove_text=True) def test_colorbar_closed_patch(): fig = plt.figure(figsize=(8, 6)) ax1 = fig.add_axes([0.05, 0.85, 0.9, 0.1]) ax2 = fig.add_axes([0.1, 0.65, 0.75, 0.1]) ax3 = fig.add_axes([0.05, 0.45, 0.9, 0.1]) ax4 = fig.add_axes([0.05, 0.25, 0.9, 0.1]) ax5 = fig.add_axes([0.05, 0.05, 0.9, 0.1]) cmap = get_cmap("RdBu", lut=5) im = ax1.pcolormesh(np.linspace(0, 10, 16).reshape((4, 4)), cmap=cmap) values = np.linspace(0, 10, 5) with rc_context({'axes.linewidth': 16}): plt.colorbar(im, cax=ax2, cmap=cmap, orientation='horizontal', extend='both', extendfrac=0.5, values=values) plt.colorbar(im, cax=ax3, cmap=cmap, orientation='horizontal', extend='both', values=values) plt.colorbar(im, cax=ax4, cmap=cmap, orientation='horizontal', extend='both', extendrect=True, values=values) plt.colorbar(im, cax=ax5, cmap=cmap, orientation='horizontal', extend='neither', values=values) def test_colorbar_ticks(): fig, ax = plt.subplots() x = np.arange(-3.0, 4.001) y = np.arange(-4.0, 3.001) X, Y = np.meshgrid(x, y) Z = X * Y clevs = np.array([-12, -5, 0, 5, 12], dtype=float) colors = ['r', 'g', 'b', 'c'] cs = ax.contourf(X, Y, Z, clevs, colors=colors) cbar = fig.colorbar(cs, ax=ax, extend='neither', orientation='horizontal', ticks=clevs) assert len(cbar.ax.xaxis.get_ticklocs()) == len(clevs) def test_colorbar_minorticks_on_off(): 2345) data = np.random.randn(20, 20) with rc_context({'_internal.classic_mode': False}): fig, ax = plt.subplots() im = ax.pcolormesh(data, vmin=-2.3, vmax=3.3) cbar = fig.colorbar(im, extend='both') cbar.minorticks_on() correct_minorticklocs = np.array([-2.2, -1.8, -1.6, -1.4, -1.2, -0.8, -0.6, -0.4, -0.2, 0.2, 0.4, 0.6, 0.8, 1.2, 1.4, 1.6, 1.8, 2.2, 2.4, 2.6, 2.8, 3.2]) np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(), correct_minorticklocs) cbar.minorticks_off() np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(), np.array([])) im.set_clim(vmin=-1.2, vmax=1.2) cbar.minorticks_on() correct_minorticklocs = np.array([-1.2, -1.1, -0.9, -0.8, -0.7, -0.6, -0.4, -0.3, -0.2, -0.1, 0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 1.1, 1.2]) np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(), correct_minorticklocs) def test_colorbar_autoticks(): with rc_context({'_internal.classic_mode': False}): fig, ax = plt.subplots(2, 1) x = np.arange(-3.0, 4.001) y = np.arange(-4.0, 3.001) X, Y = np.meshgrid(x, y) Z = X * Y pcm = ax[0].pcolormesh(X, Y, Z) cbar = fig.colorbar(pcm, ax=ax[0], extend='both', orientation='vertical') pcm = ax[1].pcolormesh(X, Y, Z) cbar2 = fig.colorbar(pcm, ax=ax[1], extend='both', orientation='vertical', shrink=0.4) np.testing.assert_almost_equal(cbar.ax.yaxis.get_ticklocs(), np.arange(-10, 11., 5.)) np.testing.assert_almost_equal(cbar2.ax.yaxis.get_ticklocs(), np.arange(-10, 11., 10.)) def test_colorbar_autotickslog(): # Test new autotick modes... with rc_context({'_internal.classic_mode': False}): fig, ax = plt.subplots(2, 1) x = np.arange(-3.0, 4.001) y = np.arange(-4.0, 3.001) X, Y = np.meshgrid(x, y) Z = X * Y pcm = ax[0].pcolormesh(X, Y, 10**Z, norm=LogNorm()) cbar = fig.colorbar(pcm, ax=ax[0], extend='both', orientation='vertical') pcm = ax[1].pcolormesh(X, Y, 10**Z, norm=LogNorm()) cbar2 = fig.colorbar(pcm, ax=ax[1], extend='both', orientation='vertical', shrink=0.4) np.testing.assert_almost_equal(cbar.ax.yaxis.get_ticklocs(), 10**np.arange(-12, 12.2, 4.)) np.testing.assert_almost_equal(cbar2.ax.yaxis.get_ticklocs(), 10**np.arange(-12, 13., 12.)) def test_colorbar_get_ticks(): # test feature for #5792 plt.figure() data = np.arange(1200).reshape(30, 40) levels = [0, 200, 400, 600, 800, 1000, 1200] plt.subplot() plt.contourf(data, levels=levels) # testing getter for user set ticks userTicks = plt.colorbar(ticks=[0, 600, 1200]) assert userTicks.get_ticks().tolist() == [0, 600, 1200] # testing for getter after calling set_ticks userTicks.set_ticks([600, 700, 800]) assert userTicks.get_ticks().tolist() == [600, 700, 800] # testing for getter after calling set_ticks with some ticks out of bounds userTicks.set_ticks([600, 1300, 1400, 1500]) assert userTicks.get_ticks().tolist() == [600] # testing getter when no ticks are assigned defTicks = plt.colorbar(orientation='horizontal') assert defTicks.get_ticks().tolist() == levels def test_colorbar_lognorm_extension(): # Test that colorbar with lognorm is extended correctly f, ax = plt.subplots() cb = ColorbarBase(ax, norm=LogNorm(vmin=0.1, vmax=1000.0), orientation='vertical', extend='both') assert cb._values[0] >= 0.0 def test_colorbar_powernorm_extension(): # Test that colorbar with powernorm is extended correctly f, ax = plt.subplots() cb = ColorbarBase(ax, norm=PowerNorm(gamma=0.5, vmin=0.0, vmax=1.0), orientation='vertical', extend='both') assert cb._values[0] >= 0.0 def test_colorbar_axes_kw(): # test fix for #8493: This does only test, that axes-related keywords pass # and do not raise an exception. plt.figure() plt.imshow(([[1, 2], [3, 4]])) plt.colorbar(orientation='horizontal', fraction=0.2, pad=0.2, shrink=0.5, aspect=10, anchor=(0., 0.), panchor=(0., 1.)) def test_colorbar_log_minortick_labels(): with rc_context({'_internal.classic_mode': False}): fig, ax = plt.subplots() pcm = ax.imshow([[10000, 50000]], norm=LogNorm()) cb = fig.colorbar(pcm) fig.canvas.draw() lb = cb.ax.yaxis.get_ticklabels(which='both') expected = [r'$\mathdefault{10^{4}}$', r'$\mathdefault{2\times10^{4}}$', r'$\mathdefault{3\times10^{4}}$', r'$\mathdefault{4\times10^{4}}$'] for l, exp in zip(lb, expected): assert l.get_text() == exp def test_colorbar_renorm(): x, y = np.ogrid[-4:4:31j, -4:4:31j] z = 120000*np.exp(-x**2 - y**2) fig, ax = plt.subplots() im = ax.imshow(z) cbar = fig.colorbar(im) norm = LogNorm(z.min(), z.max()) im.set_norm(norm) cbar.set_norm(norm) cbar.locator = LogLocator() cbar.formatter = LogFormatter() cbar.update_normal(im) assert np.isclose(cbar.vmin, z.min()) norm = LogNorm(z.min() * 1000, z.max() * 1000) im.set_norm(norm) cbar.set_norm(norm) cbar.update_normal(im) assert np.isclose(cbar.vmin, z.min() * 1000) assert np.isclose(cbar.vmax, z.max() * 1000) def test_colorbar_get_ticks(): with rc_context({'_internal.classic_mode': False}): fig, ax = plt. subplots() np.random.seed(19680801) pc = ax.pcolormesh(np.random.rand(30, 30)) cb = fig.colorbar(pc) np.testing.assert_allclose(cb.get_ticks(), [0.2, 0.4, 0.6, 0.8])
true
true
1c358c3ca06193a459088b7dc742ecfcfbfc9389
4,118
py
Python
tests/graph/test_stpg_heuristics.py
GiliardGodoi/steiner-problem-with-evol
6b34f0342b791ae6c65b6d016c37a4d45ab5cdad
[ "MIT" ]
null
null
null
tests/graph/test_stpg_heuristics.py
GiliardGodoi/steiner-problem-with-evol
6b34f0342b791ae6c65b6d016c37a4d45ab5cdad
[ "MIT" ]
5
2021-01-26T17:28:32.000Z
2021-03-14T13:46:48.000Z
tests/graph/test_stpg_heuristics.py
GiliardGodoi/steiner-problem-with-evol
6b34f0342b791ae6c65b6d016c37a4d45ab5cdad
[ "MIT" ]
1
2021-01-25T16:35:59.000Z
2021-01-25T16:35:59.000Z
import random import unittest from collections import deque from os import path from ga4stpg.graph.graph import UndirectedWeightedGraph as Graph from ga4stpg.graph.reader import ReaderORLibrary from ga4stpg.graph.steiner import (prunning_mst, shortest_path, shortest_path_origin_prim, shortest_path_with_origin) class TestSTPGHeuristicas(unittest.TestCase): def setUp(self): reader = ReaderORLibrary() self.stpg_instance = reader.parser(path.join("datasets", "ORLibrary", "steinb13.txt")) self.graph = self.stpg_instance.graph self.terminals = list(self.stpg_instance.terminals) random.seed() def test_instance_reading(self): stpg = self.stpg_instance self.assertEqual(stpg.nro_edges, 125) self.assertEqual(stpg.nro_nodes, 100) self.assertEqual(stpg.nro_terminals, 17) self.assertEqual(stpg.nro_terminals, len(stpg.terminals)) self.assertEqual(stpg.nro_nodes, len(stpg.graph.vertices)) def test_shortest_path(self): graph = self.graph stpg = self.stpg_instance terminal = random.choice(self.terminals) gg, cost = shortest_path(graph, terminal, stpg.terminals) self.common_cases(gg, cost) def test_shortest_path_with_origin(self): graph = self.graph stpg = self.stpg_instance terminal = random.choice(self.terminals) gg, cost = shortest_path_with_origin(graph, terminal, stpg.terminals) self.common_cases(gg, cost) def test_shortest_path_origin_prim(self): graph = self.graph stpg = self.stpg_instance terminal = random.choice(self.terminals) gg, cost = shortest_path_origin_prim(graph, terminal, stpg.terminals) self.common_cases(gg, cost) def test_prunning_mst(self): graph = self.graph stpg = self.stpg_instance terminal = random.choice(self.terminals) gg, cost = prunning_mst(graph, terminal, stpg.terminals) self.common_cases(gg, cost) def common_cases(self, steiner_tree : Graph, cost : int): self.assertIsInstance(steiner_tree, Graph) self.assertIsInstance(cost, int) self.assertGreater(cost,0) terminals = self.stpg_instance.terminals ## Se o vértice possui grau 1 então é terminal. Mas se for terminal possui grau 1? degrees = { k : len(steiner_tree[k]) for k in steiner_tree.edges.keys() } for k, v in degrees.items() : if v == 1 : is_terminal = (k in terminals) self.assertTrue(is_terminal) all_vertices = set(steiner_tree.vertices) self.assertIsInstance(all_vertices, set) self.assertEqual(len(all_vertices), len(steiner_tree.vertices)) ## todos os vertices terminais estao contidos na solução tt = terminals - all_vertices self.assertFalse(tt) ## Existe algum ponto de steiner na solução. Mas sempre isso vai acontecer? ss = all_vertices - terminals self.assertTrue(ss) stpg = self.stpg_instance has_cycles = self.check_cycles_dfs(steiner_tree, self.terminals[8]) self.assertFalse(has_cycles) def check_cycles_dfs(self, graph, start): ''' Verifica se existe um ciclo em um grafo a partir de um vértice. É claro, essa função não foi testada. ''' stack = deque() visited = set([start]) prev = dict() stack.append(start) has_circle = False while stack: v = stack.pop() visited.add(v) for u in graph.adjacent_to(v): if u not in visited : stack.append(u) prev[u] = v elif not prev[v] == u : has_circle = True return has_circle if __name__ == "__main__" : unittest.main()
29.84058
95
0.610248
import random import unittest from collections import deque from os import path from ga4stpg.graph.graph import UndirectedWeightedGraph as Graph from ga4stpg.graph.reader import ReaderORLibrary from ga4stpg.graph.steiner import (prunning_mst, shortest_path, shortest_path_origin_prim, shortest_path_with_origin) class TestSTPGHeuristicas(unittest.TestCase): def setUp(self): reader = ReaderORLibrary() self.stpg_instance = reader.parser(path.join("datasets", "ORLibrary", "steinb13.txt")) self.graph = self.stpg_instance.graph self.terminals = list(self.stpg_instance.terminals) random.seed() def test_instance_reading(self): stpg = self.stpg_instance self.assertEqual(stpg.nro_edges, 125) self.assertEqual(stpg.nro_nodes, 100) self.assertEqual(stpg.nro_terminals, 17) self.assertEqual(stpg.nro_terminals, len(stpg.terminals)) self.assertEqual(stpg.nro_nodes, len(stpg.graph.vertices)) def test_shortest_path(self): graph = self.graph stpg = self.stpg_instance terminal = random.choice(self.terminals) gg, cost = shortest_path(graph, terminal, stpg.terminals) self.common_cases(gg, cost) def test_shortest_path_with_origin(self): graph = self.graph stpg = self.stpg_instance terminal = random.choice(self.terminals) gg, cost = shortest_path_with_origin(graph, terminal, stpg.terminals) self.common_cases(gg, cost) def test_shortest_path_origin_prim(self): graph = self.graph stpg = self.stpg_instance terminal = random.choice(self.terminals) gg, cost = shortest_path_origin_prim(graph, terminal, stpg.terminals) self.common_cases(gg, cost) def test_prunning_mst(self): graph = self.graph stpg = self.stpg_instance terminal = random.choice(self.terminals) gg, cost = prunning_mst(graph, terminal, stpg.terminals) self.common_cases(gg, cost) def common_cases(self, steiner_tree : Graph, cost : int): self.assertIsInstance(steiner_tree, Graph) self.assertIsInstance(cost, int) self.assertGreater(cost,0) terminals = self.stpg_instance.terminals for k, v in degrees.items() : if v == 1 : is_terminal = (k in terminals) self.assertTrue(is_terminal) all_vertices = set(steiner_tree.vertices) self.assertIsInstance(all_vertices, set) self.assertEqual(len(all_vertices), len(steiner_tree.vertices)) ertFalse(tt) stpg = self.stpg_instance has_cycles = self.check_cycles_dfs(steiner_tree, self.terminals[8]) self.assertFalse(has_cycles) def check_cycles_dfs(self, graph, start): stack = deque() visited = set([start]) prev = dict() stack.append(start) has_circle = False while stack: v = stack.pop() visited.add(v) for u in graph.adjacent_to(v): if u not in visited : stack.append(u) prev[u] = v elif not prev[v] == u : has_circle = True return has_circle if __name__ == "__main__" : unittest.main()
true
true
1c358d00e0e7644e712c729d73ae6ea489028eb9
6,888
py
Python
ros/src/tl_detector/tl_detector.py
kingmbm1118/selfdriving9_Capstone
37e4613290df63e4ea55853bf93822da9e00b695
[ "MIT" ]
1
2020-08-04T01:59:44.000Z
2020-08-04T01:59:44.000Z
ros/src/tl_detector/tl_detector.py
kingmbm1118/selfdriving9_Capstone
37e4613290df63e4ea55853bf93822da9e00b695
[ "MIT" ]
12
2019-12-16T22:19:56.000Z
2022-03-12T00:06:22.000Z
ros/src/tl_detector/tl_detector.py
kingmbm1118/selfdriving9_Capstone
37e4613290df63e4ea55853bf93822da9e00b695
[ "MIT" ]
2
2019-11-27T18:53:10.000Z
2019-11-28T18:17:32.000Z
#!/usr/bin/env python import rospy import os from std_msgs.msg import Int32 from geometry_msgs.msg import PoseStamped, Pose from styx_msgs.msg import TrafficLightArray, TrafficLight from styx_msgs.msg import Lane from sensor_msgs.msg import Image from cv_bridge import CvBridge from light_classification.tl_classifier import TLClassifier import tf import cv2 import yaml from scipy.spatial import KDTree #import numpy as np STATE_COUNT_THRESHOLD = 1 # was 3 DIR_PATH = os.path.dirname(os.path.realpath(__file__)) class TLDetector(object): def __init__(self): rospy.init_node('tl_detector') # file = open("labels.csv","a") # file.write('picture,label\n') # file.close() self.pose = None self.waypoints = None self.camera_image = None self.waypoints_2d = None self.waypoint_tree = None # self.base_waypoints = None self.lights = [] sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb) sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb) ''' /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and helps you acquire an accurate ground truth data source for the traffic light classifier by sending the current color state of all traffic lights in the simulator. When testing on the vehicle, the color state will not be available. You'll need to rely on the position of the light and the camera image to predict it. ''' sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb) sub6 = rospy.Subscriber('/image_color', Image, self.image_cb) config_string = rospy.get_param("/traffic_light_config") self.config = yaml.load(config_string) self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1) self.bridge = CvBridge() self.light_classifier = TLClassifier() self.listener = tf.TransformListener() self.state = TrafficLight.UNKNOWN self.last_state = TrafficLight.UNKNOWN self.last_wp = -1 self.state_count = 0 rospy.spin() def pose_cb(self, msg): self.pose = msg def waypoints_cb(self, waypoints): self.waypoints = waypoints #self.base_lane= waypoints if not self.waypoints_2d: self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints] self.waypoint_tree = KDTree(self.waypoints_2d) def traffic_cb(self, msg): self.lights = msg.lights def image_cb(self, msg): """Identifies red lights in the incoming camera image and publishes the index of the waypoint closest to the red light's stop line to /traffic_waypoint Args: msg (Image): image from car-mounted camera """ self.has_image = True self.camera_image = msg # Convert your ROS Image message to OpenCV2 cv2_img = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8") light_wp, state = self.process_traffic_lights() ''' Publish upcoming red lights at camera frequency. Each predicted state has to occur `STATE_COUNT_THRESHOLD` number of times till we start using it. Otherwise the previous stable state is used. ''' if self.state != state: self.state_count = 0 #light_wp, state = self.process_traffic_lights() self.state = state elif self.state_count >= STATE_COUNT_THRESHOLD: self.last_state = self.state #light_wp, state = self.process_traffic_lights() light_wp = light_wp if state == TrafficLight.RED else -1 self.last_wp = light_wp self.upcoming_red_light_pub.publish(Int32(light_wp)) else: self.upcoming_red_light_pub.publish(Int32(self.last_wp)) self.state_count += 1 def get_closest_waypoint(self, x, y): """Identifies the closest path waypoint to the given position https://en.wikipedia.org/wiki/Closest_pair_of_points_problem Args: pose (Pose): position to match a waypoint to Returns: int: index of the closest waypoint in self.waypoints """ closest_idx = self.waypoint_tree.query([x, y], 1)[1] return closest_idx def get_light_state(self, light): """Determines the current color of the traffic light Args: light (TrafficLight): light to classify Returns: int: ID of traffic light color (specified in styx_msgs/TrafficLight) """ # return light.state if(not self.has_image): self.prev_light_loc = None return False cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8") # Get classification state = self.light_classifier.get_classification(cv_image) return state def process_traffic_lights(self): """Finds closest visible traffic light, if one exists, and determines its location and color Returns: int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists) int: ID of traffic light color (specified in styx_msgs/TrafficLight) """ closest_light = None line_wp_idx = None #light = None # List of positions that correspond to the line to stop in front of for a given intersection stop_line_positions = self.config['stop_line_positions'] if(self.pose): #car_position = self.get_closest_waypoint(self.pose.pose) get_closest_waypoint car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y) #TODO find the closest visible traffic light (if one exists) diff = 200 # len(self.waypoints.waypoints) for i, light in enumerate(self.lights): # Get stop line waypoint index line = stop_line_positions[i] temp_wp_idx = self.get_closest_waypoint(line[0], line[1]) # Find closest stop line waypoint index d = temp_wp_idx - car_wp_idx if d >= 0 and d < diff: diff = d closest_light = light line_wp_idx = temp_wp_idx if closest_light: state = self.get_light_state(closest_light) return line_wp_idx, state #self.waypoints = None return -1, TrafficLight.UNKNOWN if __name__ == '__main__': try: TLDetector() except rospy.ROSInterruptException: rospy.logerr('Could not start traffic node.')
37.434783
132
0.642857
import rospy import os from std_msgs.msg import Int32 from geometry_msgs.msg import PoseStamped, Pose from styx_msgs.msg import TrafficLightArray, TrafficLight from styx_msgs.msg import Lane from sensor_msgs.msg import Image from cv_bridge import CvBridge from light_classification.tl_classifier import TLClassifier import tf import cv2 import yaml from scipy.spatial import KDTree STATE_COUNT_THRESHOLD = 1 DIR_PATH = os.path.dirname(os.path.realpath(__file__)) class TLDetector(object): def __init__(self): rospy.init_node('tl_detector') self.pose = None self.waypoints = None self.camera_image = None self.waypoints_2d = None self.waypoint_tree = None self.lights = [] sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb) sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb) sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb) sub6 = rospy.Subscriber('/image_color', Image, self.image_cb) config_string = rospy.get_param("/traffic_light_config") self.config = yaml.load(config_string) self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1) self.bridge = CvBridge() self.light_classifier = TLClassifier() self.listener = tf.TransformListener() self.state = TrafficLight.UNKNOWN self.last_state = TrafficLight.UNKNOWN self.last_wp = -1 self.state_count = 0 rospy.spin() def pose_cb(self, msg): self.pose = msg def waypoints_cb(self, waypoints): self.waypoints = waypoints if not self.waypoints_2d: self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints] self.waypoint_tree = KDTree(self.waypoints_2d) def traffic_cb(self, msg): self.lights = msg.lights def image_cb(self, msg): self.has_image = True self.camera_image = msg cv2_img = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8") light_wp, state = self.process_traffic_lights() if self.state != state: self.state_count = 0 self.state = state elif self.state_count >= STATE_COUNT_THRESHOLD: self.last_state = self.state light_wp = light_wp if state == TrafficLight.RED else -1 self.last_wp = light_wp self.upcoming_red_light_pub.publish(Int32(light_wp)) else: self.upcoming_red_light_pub.publish(Int32(self.last_wp)) self.state_count += 1 def get_closest_waypoint(self, x, y): closest_idx = self.waypoint_tree.query([x, y], 1)[1] return closest_idx def get_light_state(self, light): if(not self.has_image): self.prev_light_loc = None return False cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8") state = self.light_classifier.get_classification(cv_image) return state def process_traffic_lights(self): closest_light = None line_wp_idx = None stop_line_positions = self.config['stop_line_positions'] if(self.pose): car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y) diff = 200 for i, light in enumerate(self.lights): line = stop_line_positions[i] temp_wp_idx = self.get_closest_waypoint(line[0], line[1]) d = temp_wp_idx - car_wp_idx if d >= 0 and d < diff: diff = d closest_light = light line_wp_idx = temp_wp_idx if closest_light: state = self.get_light_state(closest_light) return line_wp_idx, state return -1, TrafficLight.UNKNOWN if __name__ == '__main__': try: TLDetector() except rospy.ROSInterruptException: rospy.logerr('Could not start traffic node.')
true
true
1c358d5c277563ebf7ec4e31a4234472b1bb6da1
389
py
Python
week06/week06/wsgi.py
fuengfa/CS459
cf8b8dcdb94ebcb894551174e5223b857425e1f6
[ "BSD-2-Clause" ]
null
null
null
week06/week06/wsgi.py
fuengfa/CS459
cf8b8dcdb94ebcb894551174e5223b857425e1f6
[ "BSD-2-Clause" ]
null
null
null
week06/week06/wsgi.py
fuengfa/CS459
cf8b8dcdb94ebcb894551174e5223b857425e1f6
[ "BSD-2-Clause" ]
null
null
null
""" WSGI config for week06 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "week06.settings") application = get_wsgi_application()
22.882353
78
0.784062
import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "week06.settings") application = get_wsgi_application()
true
true
1c358d63343780b4bc592c3310d68823d9e3ed92
6,650
py
Python
Python/utils/trimesh.py
hpbader42/Klampt
89faaef942c0c6fca579a3770314c6610e2ac772
[ "BSD-3-Clause" ]
null
null
null
Python/utils/trimesh.py
hpbader42/Klampt
89faaef942c0c6fca579a3770314c6610e2ac772
[ "BSD-3-Clause" ]
null
null
null
Python/utils/trimesh.py
hpbader42/Klampt
89faaef942c0c6fca579a3770314c6610e2ac772
[ "BSD-3-Clause" ]
1
2019-07-01T08:48:32.000Z
2019-07-01T08:48:32.000Z
from klampt.vectorops import * import math #import numpy def rotationMatrix(x,y,z,rads): c = math.cos(rads) s = math.sin(rads) #cross = numpy.matrix([[0.0,-z,y],[z,0.0,-x],[-y,x,0.0]]) #return numpy.eye(3)+c*cross-s*cross*cross; val = [[c,0.0,0.0],[0.0,c,0.0],[0.0,0.0,c]] rrt = [[x*x,x*y,x*z],[y*x,y*y,y*z],[z*x,z*y,z*z]] cross = [[0.0,-z,y],[z,0.0,-x],[-y,x,0.0]] for i in range(3): for j in range(3): val[i][j] += (1.0-c)*rrt[i][j] + s*cross[i][j] return val def mulMat(mat,x): return (dot(mat[0],x),dot(mat[1],x),dot(mat[2],x)) def triangleNormal(a,b,c): ba = sub(b,a) ca = sub(c,a) n = cross(ba,ca) return div(n,norm(n)) def triangleArea(a,b,c): if len(a)!=len(b) or len(a)!=len(c): raise 'point dimensions not equal' if len(a)==3: ba=(b[0]-a[0],b[1]-a[1],b[2]-a[2]) ca=(c[0]-a[0],c[1]-a[1],c[2]-a[2]) n = cross(ba,ca) return norm(n)*0.5 elif len(a)==2: ba=(b[0]-a[0],b[1]-a[1],b[2]-a[2]) ca=(c[0]-a[0],c[1]-a[1],c[2]-a[2]) return cross(ba,ca)*0.5 else: raise 'Triangle must be in 2 or 3D' def triangleWeight(a,b,c): dA=distanceSquared(b,c) dB=distanceSquared(a,c) dC=distanceSquared(a,b) try: return (dA+dB+dC)/triangleArea(a,b,c) except ZeroDivisionError: return float('infinity') def outputTriangles(ind,i,k): j=ind[i][k] if j<0: return [] if j<=i or j>=k: raise 'invalid indices %d %d %d' % (i,j,k) return [(i,j,k)]+outputTriangles(ind,i,j)+outputTriangles(ind,j,k) def triangulateConvexOptimal(verts): if len(verts)<3: raise 'Face must have 3 or more points' cost=[] ind=[] for i in range(len(verts)): cost.append([0]*len(verts)) ind.append([-1]*len(verts)) for i in range(len(verts)-2): cost[i][i+2]=triangleWeight(verts[i],verts[i+2],verts[i+1]) ind[i][i+2]=i+1 for diag in range(3,len(verts)): for i in range(len(verts)-diag): k=i+diag alts=[(cost[i][j]+cost[j][k]+triangleWeight(verts[i],verts[j],verts[k]),j) for j in range(i+1,k)] (cost[i][k],ind[i][k])=min(alts) tris=[] res=outputTriangles(ind,0,len(verts)-1) return res def triangulateConvex(verts): if len(verts)<3: raise 'Face must have 3 or more points' n=len(verts) corner = 0 largestArea = 0 for i in range(n): leastArea = 1e100 for j in range(1,n-1): v1 = (i+j)%n v2 = (i+j+1)%n a = triangleArea(verts[i],verts[v1],verts[v2]) if a < leastArea: leastArea = a if leastArea > largestArea: largestArea = leastArea corner = i res = [] for i in range(1,n-1): v1 = (corner+i)%n v2 = (corner+i+1)%n res.append((corner,v1,v2)) return res class TriMesh: points = [] triangles = [] def __init__(self): self.points = [] self.triangles = [] def triangulate(self,pts,faces): tris=[] for f in faces: inds=triangulateConvex([pts[i] for i in f]) tris.extend([(f[i],f[j],f[k]) for (i,j,k) in inds]); return tris def triangulateOptimal(self,pts,faces): tris=[] for f in faces: inds=triangulateConvexOptimal([pts[i] for i in f]) tris.extend([(f[i],f[j],f[k]) for (i,j,k) in inds]); return tris def simpleTriangulate(self,pts,faces): tris=[] for f in faces: for i in range(len(f)-2): tris.append((f[0],f[i+1],f[i+2])) return tris def addTriangle(self,pts): if len(pts) != 3: raise 'must give a list of 3 points' offset = len(self.points) self.points = self.points + (pts) self.triangles.append((offset,offset+1,offset+2)) def addConvexPolygon(self,pts): inds=triangulateConvex(pts) offset = len(self.points) self.points = self.points + (pts) offsettris = [(a+offset,b+offset,c+offset) for (a,b,c) in inds] self.triangles = self.triangles + offsettris def addTriangles(self,pts,tris): offset = len(self.points) self.points = self.points + (pts) offsettris = [(a+offset,b+offset,c+offset) for (a,b,c) in tris] self.triangles = self.triangles + offsettris def flipYZ(self): for p in range(len(self.points)): (a,b,c) = self.points[p] self.points[p] = (a,-c,b) def flipZ(self): for p in range(len(self.points)): (a,b,c) = self.points[p] self.points[p] = (a,b,-c) def flipFaces(self): for t in range(len(self.triangles)): (a,b,c) = self.triangles[t] self.triangles[t] = (a,c,b) def rotate(self,x,y,z,rads): R = rotationMatrix(x,y,z,rads) for v in range(len(self.points)): self.points[v] = mulMat(R,self.points[v]) def translate(self,x,y,z): for v in range(len(self.points)): self.points[v] = add(self.points[v],(x,y,z)) def scale(self,s): for v in range(len(self.points)): self.points[v] = mul(self.points[v],s) def triangleNormal(self,tri): a = self.points[self.triangles[tri][0]] b = self.points[self.triangles[tri][1]] c = self.points[self.triangles[tri][2]] return triangleNormal(a,b,c) def load(self,fn): f = open(fn,'r') try: items = (' '.join(f.readlines())).split(); nv = int(items[0]) vtext = zip(items[1:1+nv*3:3],items[2:2+nv*3:3],items[3:3+nv*3:3]) self.points = [(float(i[0]),float(i[1]),float(i[2])) for i in vtext] items = items[1+nv*3:] nt = int(items[0]) ttext = zip(items[1:1+nt*3:3],items[2:2+nt*3:3],items[3:3+nt*3:3]) self.triangles = [(int(i[0]),int(i[1]),int(i[2])) for i in ttext] items = items[1+nt*3:] if len(items) != 0: print('Warning,',len(items),'words at end of file') except: raise IOError('Error loading tri mesh from '+fn) finally: f.close() def save(self,fn): f = open(fn,'w') f.write(str(len(self.points))+'\n') for a,b,c in self.points: f.write('%g %g %g\n'%(a,b,c)) f.write(str(len(self.triangles))+'\n') for a,b,c in self.triangles: f.write(str(a)+' '+str(b)+' '+str(c)+'\n') f.close()
31.367925
109
0.521203
from klampt.vectorops import * import math def rotationMatrix(x,y,z,rads): c = math.cos(rads) s = math.sin(rads) val = [[c,0.0,0.0],[0.0,c,0.0],[0.0,0.0,c]] rrt = [[x*x,x*y,x*z],[y*x,y*y,y*z],[z*x,z*y,z*z]] cross = [[0.0,-z,y],[z,0.0,-x],[-y,x,0.0]] for i in range(3): for j in range(3): val[i][j] += (1.0-c)*rrt[i][j] + s*cross[i][j] return val def mulMat(mat,x): return (dot(mat[0],x),dot(mat[1],x),dot(mat[2],x)) def triangleNormal(a,b,c): ba = sub(b,a) ca = sub(c,a) n = cross(ba,ca) return div(n,norm(n)) def triangleArea(a,b,c): if len(a)!=len(b) or len(a)!=len(c): raise 'point dimensions not equal' if len(a)==3: ba=(b[0]-a[0],b[1]-a[1],b[2]-a[2]) ca=(c[0]-a[0],c[1]-a[1],c[2]-a[2]) n = cross(ba,ca) return norm(n)*0.5 elif len(a)==2: ba=(b[0]-a[0],b[1]-a[1],b[2]-a[2]) ca=(c[0]-a[0],c[1]-a[1],c[2]-a[2]) return cross(ba,ca)*0.5 else: raise 'Triangle must be in 2 or 3D' def triangleWeight(a,b,c): dA=distanceSquared(b,c) dB=distanceSquared(a,c) dC=distanceSquared(a,b) try: return (dA+dB+dC)/triangleArea(a,b,c) except ZeroDivisionError: return float('infinity') def outputTriangles(ind,i,k): j=ind[i][k] if j<0: return [] if j<=i or j>=k: raise 'invalid indices %d %d %d' % (i,j,k) return [(i,j,k)]+outputTriangles(ind,i,j)+outputTriangles(ind,j,k) def triangulateConvexOptimal(verts): if len(verts)<3: raise 'Face must have 3 or more points' cost=[] ind=[] for i in range(len(verts)): cost.append([0]*len(verts)) ind.append([-1]*len(verts)) for i in range(len(verts)-2): cost[i][i+2]=triangleWeight(verts[i],verts[i+2],verts[i+1]) ind[i][i+2]=i+1 for diag in range(3,len(verts)): for i in range(len(verts)-diag): k=i+diag alts=[(cost[i][j]+cost[j][k]+triangleWeight(verts[i],verts[j],verts[k]),j) for j in range(i+1,k)] (cost[i][k],ind[i][k])=min(alts) tris=[] res=outputTriangles(ind,0,len(verts)-1) return res def triangulateConvex(verts): if len(verts)<3: raise 'Face must have 3 or more points' n=len(verts) corner = 0 largestArea = 0 for i in range(n): leastArea = 1e100 for j in range(1,n-1): v1 = (i+j)%n v2 = (i+j+1)%n a = triangleArea(verts[i],verts[v1],verts[v2]) if a < leastArea: leastArea = a if leastArea > largestArea: largestArea = leastArea corner = i res = [] for i in range(1,n-1): v1 = (corner+i)%n v2 = (corner+i+1)%n res.append((corner,v1,v2)) return res class TriMesh: points = [] triangles = [] def __init__(self): self.points = [] self.triangles = [] def triangulate(self,pts,faces): tris=[] for f in faces: inds=triangulateConvex([pts[i] for i in f]) tris.extend([(f[i],f[j],f[k]) for (i,j,k) in inds]); return tris def triangulateOptimal(self,pts,faces): tris=[] for f in faces: inds=triangulateConvexOptimal([pts[i] for i in f]) tris.extend([(f[i],f[j],f[k]) for (i,j,k) in inds]); return tris def simpleTriangulate(self,pts,faces): tris=[] for f in faces: for i in range(len(f)-2): tris.append((f[0],f[i+1],f[i+2])) return tris def addTriangle(self,pts): if len(pts) != 3: raise 'must give a list of 3 points' offset = len(self.points) self.points = self.points + (pts) self.triangles.append((offset,offset+1,offset+2)) def addConvexPolygon(self,pts): inds=triangulateConvex(pts) offset = len(self.points) self.points = self.points + (pts) offsettris = [(a+offset,b+offset,c+offset) for (a,b,c) in inds] self.triangles = self.triangles + offsettris def addTriangles(self,pts,tris): offset = len(self.points) self.points = self.points + (pts) offsettris = [(a+offset,b+offset,c+offset) for (a,b,c) in tris] self.triangles = self.triangles + offsettris def flipYZ(self): for p in range(len(self.points)): (a,b,c) = self.points[p] self.points[p] = (a,-c,b) def flipZ(self): for p in range(len(self.points)): (a,b,c) = self.points[p] self.points[p] = (a,b,-c) def flipFaces(self): for t in range(len(self.triangles)): (a,b,c) = self.triangles[t] self.triangles[t] = (a,c,b) def rotate(self,x,y,z,rads): R = rotationMatrix(x,y,z,rads) for v in range(len(self.points)): self.points[v] = mulMat(R,self.points[v]) def translate(self,x,y,z): for v in range(len(self.points)): self.points[v] = add(self.points[v],(x,y,z)) def scale(self,s): for v in range(len(self.points)): self.points[v] = mul(self.points[v],s) def triangleNormal(self,tri): a = self.points[self.triangles[tri][0]] b = self.points[self.triangles[tri][1]] c = self.points[self.triangles[tri][2]] return triangleNormal(a,b,c) def load(self,fn): f = open(fn,'r') try: items = (' '.join(f.readlines())).split(); nv = int(items[0]) vtext = zip(items[1:1+nv*3:3],items[2:2+nv*3:3],items[3:3+nv*3:3]) self.points = [(float(i[0]),float(i[1]),float(i[2])) for i in vtext] items = items[1+nv*3:] nt = int(items[0]) ttext = zip(items[1:1+nt*3:3],items[2:2+nt*3:3],items[3:3+nt*3:3]) self.triangles = [(int(i[0]),int(i[1]),int(i[2])) for i in ttext] items = items[1+nt*3:] if len(items) != 0: print('Warning,',len(items),'words at end of file') except: raise IOError('Error loading tri mesh from '+fn) finally: f.close() def save(self,fn): f = open(fn,'w') f.write(str(len(self.points))+'\n') for a,b,c in self.points: f.write('%g %g %g\n'%(a,b,c)) f.write(str(len(self.triangles))+'\n') for a,b,c in self.triangles: f.write(str(a)+' '+str(b)+' '+str(c)+'\n') f.close()
true
true
1c358e0ca080cd6799da1d56d8825177021aec6e
121
py
Python
platform/core/polyaxon/polyflow/apps.py
hackerwins/polyaxon
ff56a098283ca872abfbaae6ba8abba479ffa394
[ "Apache-2.0" ]
null
null
null
platform/core/polyaxon/polyflow/apps.py
hackerwins/polyaxon
ff56a098283ca872abfbaae6ba8abba479ffa394
[ "Apache-2.0" ]
null
null
null
platform/core/polyaxon/polyflow/apps.py
hackerwins/polyaxon
ff56a098283ca872abfbaae6ba8abba479ffa394
[ "Apache-2.0" ]
null
null
null
from django.apps import AppConfig class PolyflowConfig(AppConfig): name = 'polyflow' verbose_name = 'Polyflow'
17.285714
33
0.735537
from django.apps import AppConfig class PolyflowConfig(AppConfig): name = 'polyflow' verbose_name = 'Polyflow'
true
true
1c358e4113ad9f6e9b8502c37297313a1c2eefbf
5,416
py
Python
octopus/tests/BTC/test_explorer.py
SillyTin/octopus
5ad30f4affaa209a6ccc03c406c41c820dab62dc
[ "MIT" ]
212
2018-06-29T10:13:42.000Z
2020-02-16T07:55:47.000Z
octopus/tests/BTC/test_explorer.py
b-mueller/octopus
249f2afa37859bf645bc0251502ac104c4b37ec7
[ "MIT" ]
12
2018-07-05T08:50:12.000Z
2020-01-16T08:41:49.000Z
octopus/tests/BTC/test_explorer.py
b-mueller/octopus
249f2afa37859bf645bc0251502ac104c4b37ec7
[ "MIT" ]
34
2018-06-28T11:49:02.000Z
2020-02-05T07:42:33.000Z
from octopus.platforms.BTC.explorer import BitcoinExplorerRPC from octopus.platforms.BTC.explorer import RPC_USER, RPC_PASSWORD, RPC_HOST import unittest class BitcoinExplorerTestCase(unittest.TestCase): explorer = BitcoinExplorerRPC(host=('%s:%s@%s' % (RPC_USER, RPC_PASSWORD, RPC_HOST))) blockhash = '00000000000000000024fb37364cbf81fd49cc2d51c09c75c35433c3a1945d04' txid = '1b5bfc2681d40c872126919ccb1752de4cca42dcfc594899f2ef11db4b05bb39' tx_raw = '0200000001686b654b40737f0daa1532f64e525dc925e60d075403d38cfb12ac9097764015040000006a473044022009ec3f26984906a813faae05d968ec06bf1c68883e09a00b6333126ea87d96b302201cf1d2b9165442aa178fdf772a3909c3d2ba69e454eb8660fa35df8645e3bcb60121022f2caec3ad2f3b174d048a0d46f4f6e8ba4e9d02f6bdbba64ac6817f7ac6c131ffffffff02060d0700000000001976a91407c5acae3abc91735a1471e275e33abbffada89088ac00581300000000001976a91432f2e30111e1dc45f415430ef082cb64225c538a88ac00000000' wallet_address = '15wDxrRCn7YiCXdvqjcih6G8svrmq5AQSS' script_hex = "76a82096b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf88ac" script_asm = 'OP_DUP OP_SHA256 96b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf OP_EQUALVERIFY OP_CHECKSIG' def testRPCCommand(self): ####################### # HIGHT-LEVEL METHODS # ####################### self.assertEqual(self.explorer.get_transaction(self.txid, 0), self.tx_raw) self.assertEqual(len(self.explorer.get_block_by_hash(self.blockhash)), 18) self.assertEqual(len(self.explorer.get_block_by_number(500000)), 18) #################### # JSON-RPC METHODS # #################### self.assertEqual(self.explorer.decoderawtransaction(self.tx_raw)['txid'], self.txid) self.assertEqual(self.explorer.decodescript(self.script_hex)['asm'], self.script_asm) self.assertEqual(len(self.explorer.getbestblockhash()), len(self.blockhash)) self.assertEqual(len(self.explorer.getblock(self.blockhash)), 18) self.assertEqual(len(self.explorer.getblockchaininfo()), 11) self.assertEqual(type(self.explorer.getblockcount()), int) self.assertEqual(self.explorer.getblockhash(500000), self.blockhash) # self.assertEqual(len(self.explorer.getchaintips()), 2) self.assertEqual(type(self.explorer.getconnectioncount()), int) self.assertEqual(type(self.explorer.getdifficulty()), float) self.assertEqual(len(self.explorer.getinfo()), 16) self.assertEqual(len(self.explorer.getmempoolinfo()), 5) self.assertEqual(len(self.explorer.getmininginfo()), 8) self.assertEqual(len(self.explorer.getnettotals()), 4) self.assertEqual(type(self.explorer.getnetworkhashps()), float) self.assertEqual(len(self.explorer.getnetworkinfo()), 13) self.assertEqual(len(self.explorer.getpeerinfo()), 8) self.assertEqual(type(self.explorer.getrawmempool()), list) self.assertEqual(self.explorer.getrawtransaction(self.txid), self.tx_raw) self.assertEqual(type(self.explorer.getreceivedbyaccount('')), float) self.assertEqual(type(self.explorer.getreceivedbyaddress(self.wallet_address)), float) self.assertEqual(len(self.explorer.gettxout(self.txid, 0)), 5) self.assertEqual(len(self.explorer.gettxoutproof([self.txid])), 818) self.assertEqual(type(self.explorer.getunconfirmedbalance()), float) self.assertEqual(len(self.explorer.getwalletinfo()), 9) self.assertEqual(type(self.explorer.help()), str) self.assertEqual(len(self.explorer.validateaddress(self.wallet_address)), 6) self.assertEqual(self.explorer.verifytxoutproof(self.explorer.gettxoutproof([self.txid])), [self.txid]) # Not tested ''' self.explorer.abandontransaction() self.explorer.addmultisigaddress() self.explorer.addnode() self.explorer.createmultisig() self.explorer.createrawtransaction() self.explorer.dumpprivkey() self.explorer.encryptwallet() self.explorer.estimatefee() self.explorer.estimatepriority() self.explorer.getaccountaddress() self.explorer.getaccount() self.explorer.getaddednodeinfo() self.explorer.getaddressesbyaccount() self.explorer.getbalance() self.explorer.gettransaction() self.explorer.keypoolrefill() self.explorer.listaccounts() self.explorer.listaddressgroupings() self.explorer.listlockunspent() self.explorer.listreceivedbyaccount() self.explorer.listreceivedbyaddress() self.explorer.listtransactions() self.explorer.listunspent() self.explorer.lockunspent() self.explorer.prioritisetransaction() self.explorer.sendfrom() self.explorer.sendmany() self.explorer.sendrawtransaction() self.explorer.sendtoaddress() self.explorer.settxfee() self.explorer.signmessage() self.explorer.signrawtransaction() self.explorer.submitblock() self.explorer.verifymessage() self.explorer.walletlock() self.explorer.walletpassphrase() self.explorer.walletpassphrasechange() ''' if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(BitcoinExplorerTestCase) unittest.TextTestRunner(verbosity=2).run(suite)
51.09434
465
0.723227
from octopus.platforms.BTC.explorer import BitcoinExplorerRPC from octopus.platforms.BTC.explorer import RPC_USER, RPC_PASSWORD, RPC_HOST import unittest class BitcoinExplorerTestCase(unittest.TestCase): explorer = BitcoinExplorerRPC(host=('%s:%s@%s' % (RPC_USER, RPC_PASSWORD, RPC_HOST))) blockhash = '00000000000000000024fb37364cbf81fd49cc2d51c09c75c35433c3a1945d04' txid = '1b5bfc2681d40c872126919ccb1752de4cca42dcfc594899f2ef11db4b05bb39' tx_raw = '0200000001686b654b40737f0daa1532f64e525dc925e60d075403d38cfb12ac9097764015040000006a473044022009ec3f26984906a813faae05d968ec06bf1c68883e09a00b6333126ea87d96b302201cf1d2b9165442aa178fdf772a3909c3d2ba69e454eb8660fa35df8645e3bcb60121022f2caec3ad2f3b174d048a0d46f4f6e8ba4e9d02f6bdbba64ac6817f7ac6c131ffffffff02060d0700000000001976a91407c5acae3abc91735a1471e275e33abbffada89088ac00581300000000001976a91432f2e30111e1dc45f415430ef082cb64225c538a88ac00000000' wallet_address = '15wDxrRCn7YiCXdvqjcih6G8svrmq5AQSS' script_hex = "76a82096b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf88ac" script_asm = 'OP_DUP OP_SHA256 96b3fe1f4ec8fd076379267f72443bed81cc49c18a2913f7e1f0727f6f9f4fbf OP_EQUALVERIFY OP_CHECKSIG' def testRPCCommand(self): e(self.explorer.getconnectioncount()), int) self.assertEqual(type(self.explorer.getdifficulty()), float) self.assertEqual(len(self.explorer.getinfo()), 16) self.assertEqual(len(self.explorer.getmempoolinfo()), 5) self.assertEqual(len(self.explorer.getmininginfo()), 8) self.assertEqual(len(self.explorer.getnettotals()), 4) self.assertEqual(type(self.explorer.getnetworkhashps()), float) self.assertEqual(len(self.explorer.getnetworkinfo()), 13) self.assertEqual(len(self.explorer.getpeerinfo()), 8) self.assertEqual(type(self.explorer.getrawmempool()), list) self.assertEqual(self.explorer.getrawtransaction(self.txid), self.tx_raw) self.assertEqual(type(self.explorer.getreceivedbyaccount('')), float) self.assertEqual(type(self.explorer.getreceivedbyaddress(self.wallet_address)), float) self.assertEqual(len(self.explorer.gettxout(self.txid, 0)), 5) self.assertEqual(len(self.explorer.gettxoutproof([self.txid])), 818) self.assertEqual(type(self.explorer.getunconfirmedbalance()), float) self.assertEqual(len(self.explorer.getwalletinfo()), 9) self.assertEqual(type(self.explorer.help()), str) self.assertEqual(len(self.explorer.validateaddress(self.wallet_address)), 6) self.assertEqual(self.explorer.verifytxoutproof(self.explorer.gettxoutproof([self.txid])), [self.txid]) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(BitcoinExplorerTestCase) unittest.TextTestRunner(verbosity=2).run(suite)
true
true
1c35909cfc3500db75b7730f3679f74a40141835
2,119
py
Python
Hessian/models/c1.py
Bhaskers-Blu-Org1/model-sanitization
1eff7e9f35e4fd194ffc83a55e4f6688ca9bb5c3
[ "Apache-2.0" ]
15
2020-05-04T15:28:36.000Z
2021-12-31T02:34:12.000Z
Hessian/models/c1.py
Bhaskers-Blu-Org1/model-sanitization
1eff7e9f35e4fd194ffc83a55e4f6688ca9bb5c3
[ "Apache-2.0" ]
1
2020-05-14T04:56:22.000Z
2020-05-27T14:10:12.000Z
Hessian/models/c1.py
IBM/model-sanitization
1eff7e9f35e4fd194ffc83a55e4f6688ca9bb5c3
[ "Apache-2.0" ]
4
2020-06-29T15:18:57.000Z
2022-03-27T17:04:07.000Z
from __future__ import print_function import numpy as np import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from torch.autograd import Variable class c1_model(nn.Module): def __init__(self, num_classes=10): super(c1_model, self).__init__() self.conv1=nn.Conv2d(3, 64, kernel_size=5, stride=1, padding=2)# 32x32x3 -> 32x32x64 self.bn1=nn.BatchNorm2d(64) self.conv2=nn.Conv2d(64, 64, kernel_size=5, stride =1, padding=2)# 16x16x64 -> 16x16x64 self.bn2=nn.BatchNorm2d(64) self.fc1= nn.Linear(64*8*8, 384) self.fc2=nn.Linear(384,192) self.fc3=nn.Linear(192,num_classes) def forward(self, x): x = F.max_pool2d(self.bn1(F.relu(self.conv1(x))),3,2,1) x = F.max_pool2d(self.bn2(F.relu(self.conv2(x))),3,2,1) x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x class c2_model(nn.Module): def __init__(self): super(c2_model, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3) self.bn1 = nn.BatchNorm2d(64) self.conv2 = nn.Conv2d(64, 64, kernel_size=3) self.bn2 = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(64,128, kernel_size=3) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128,128, kernel_size=3) self.bn4 = nn.BatchNorm2d(128) self.conv_drop = nn.Dropout2d() self.fc1 = nn.Linear(128*5*5, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 10) self.drop = nn.Dropout() def forward(self, x): x = self.bn1(F.relu(self.conv1(x))) x = F.max_pool2d(self.bn2(F.relu(self.conv2(x))) ,2) x = self.bn3(F.relu(self.conv3(x))) x = F.max_pool2d(self.bn4(F.relu(self.conv4(x))) ,2) #x = self.conv_drop(x) x = x.view(-1, 128*5*5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x
32.6
95
0.596508
from __future__ import print_function import numpy as np import argparse import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from torch.autograd import Variable class c1_model(nn.Module): def __init__(self, num_classes=10): super(c1_model, self).__init__() self.conv1=nn.Conv2d(3, 64, kernel_size=5, stride=1, padding=2) self.bn1=nn.BatchNorm2d(64) self.conv2=nn.Conv2d(64, 64, kernel_size=5, stride =1, padding=2) self.bn2=nn.BatchNorm2d(64) self.fc1= nn.Linear(64*8*8, 384) self.fc2=nn.Linear(384,192) self.fc3=nn.Linear(192,num_classes) def forward(self, x): x = F.max_pool2d(self.bn1(F.relu(self.conv1(x))),3,2,1) x = F.max_pool2d(self.bn2(F.relu(self.conv2(x))),3,2,1) x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x class c2_model(nn.Module): def __init__(self): super(c2_model, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3) self.bn1 = nn.BatchNorm2d(64) self.conv2 = nn.Conv2d(64, 64, kernel_size=3) self.bn2 = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(64,128, kernel_size=3) self.bn3 = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(128,128, kernel_size=3) self.bn4 = nn.BatchNorm2d(128) self.conv_drop = nn.Dropout2d() self.fc1 = nn.Linear(128*5*5, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 10) self.drop = nn.Dropout() def forward(self, x): x = self.bn1(F.relu(self.conv1(x))) x = F.max_pool2d(self.bn2(F.relu(self.conv2(x))) ,2) x = self.bn3(F.relu(self.conv3(x))) x = F.max_pool2d(self.bn4(F.relu(self.conv4(x))) ,2) x = x.view(-1, 128*5*5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x
true
true
1c3591614ba26c792bb2d02a3e14a8f342bd3969
1,915
py
Python
test/functional/rpc_signmessage.py
BlockStamp/bstnew
f3775b14874a78c3fd811616752f2c07415b04ee
[ "MIT" ]
37
2018-11-29T13:13:40.000Z
2020-12-10T00:39:21.000Z
test/functional/rpc_signmessage.py
BlockStamp/bstnew
f3775b14874a78c3fd811616752f2c07415b04ee
[ "MIT" ]
6
2018-11-27T19:07:36.000Z
2019-07-13T06:28:39.000Z
test/functional/rpc_signmessage.py
BlockStamp/bstnew
f3775b14874a78c3fd811616752f2c07415b04ee
[ "MIT" ]
6
2018-05-28T07:58:46.000Z
2020-01-15T18:41:24.000Z
#!/usr/bin/env python3 # Copyright (c) 2016-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test RPC commands for signing and verifying messages.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class SignMessagesTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-addresstype=legacy"]] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): message = 'This is just a test message' self.log.info('test signing with priv_key') priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N' address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB' expected_signature = 'IL7XeeAHgdYRw5lGF/PWozzPkhVa1AYJS/QWlGxYkzV1KMpPzAtPgdc9RPxCre1dycK7ujZav207ubnLH9fjCqs=' signature = self.nodes[0].signmessagewithprivkey(priv_key, message) assert_equal(expected_signature, signature) assert(self.nodes[0].verifymessage(address, signature, message)) self.log.info('test signing with an address with wallet') address = self.nodes[0].getnewaddress() signature = self.nodes[0].signmessage(address, message) assert(self.nodes[0].verifymessage(address, signature, message)) self.log.info('test verifying with another address should not work') other_address = self.nodes[0].getnewaddress() other_signature = self.nodes[0].signmessage(other_address, message) assert(not self.nodes[0].verifymessage(other_address, signature, message)) assert(not self.nodes[0].verifymessage(address, other_signature, message)) if __name__ == '__main__': SignMessagesTest().main()
44.534884
119
0.733681
from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class SignMessagesTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-addresstype=legacy"]] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): message = 'This is just a test message' self.log.info('test signing with priv_key') priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N' address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB' expected_signature = 'IL7XeeAHgdYRw5lGF/PWozzPkhVa1AYJS/QWlGxYkzV1KMpPzAtPgdc9RPxCre1dycK7ujZav207ubnLH9fjCqs=' signature = self.nodes[0].signmessagewithprivkey(priv_key, message) assert_equal(expected_signature, signature) assert(self.nodes[0].verifymessage(address, signature, message)) self.log.info('test signing with an address with wallet') address = self.nodes[0].getnewaddress() signature = self.nodes[0].signmessage(address, message) assert(self.nodes[0].verifymessage(address, signature, message)) self.log.info('test verifying with another address should not work') other_address = self.nodes[0].getnewaddress() other_signature = self.nodes[0].signmessage(other_address, message) assert(not self.nodes[0].verifymessage(other_address, signature, message)) assert(not self.nodes[0].verifymessage(address, other_signature, message)) if __name__ == '__main__': SignMessagesTest().main()
true
true
1c359266bc1d9b1602ff3fa83127beb8c92ced5f
56,633
py
Python
plugin.video.youtube/resources/lib/youtube_plugin/youtube/client/youtube.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
2
2018-11-02T19:55:30.000Z
2020-08-14T02:22:20.000Z
plugin.video.youtube/resources/lib/youtube_plugin/youtube/client/youtube.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
null
null
null
plugin.video.youtube/resources/lib/youtube_plugin/youtube/client/youtube.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
3
2019-12-17T20:47:00.000Z
2021-02-11T19:03:59.000Z
# -*- coding: utf-8 -*- """ Copyright (C) 2014-2016 bromix (plugin.video.youtube) Copyright (C) 2016-2018 plugin.video.youtube SPDX-License-Identifier: GPL-2.0-only See LICENSES/GPL-2.0-only for more information. """ import copy import json import re import threading import traceback import requests from .login_client import LoginClient from ..helper.video_info import VideoInfo from ..helper.utils import get_shelf_index_by_title from ...kodion import constants from ...kodion import Context from ...kodion.utils import datetime_parser _context = Context(plugin_id='plugin.video.youtube') class YouTube(LoginClient): def __init__(self, config=None, language='en-US', region='US', items_per_page=50, access_token='', access_token_tv=''): if config is None: config = {} LoginClient.__init__(self, config=config, language=language, region=region, access_token=access_token, access_token_tv=access_token_tv) self._max_results = items_per_page def get_max_results(self): return self._max_results def get_language(self): return self._language def get_region(self): return self._region @staticmethod def calculate_next_page_token(page, max_result): page -= 1 low = 'AEIMQUYcgkosw048' high = 'ABCDEFGHIJKLMNOP' len_low = len(low) len_high = len(high) position = page * max_result overflow_token = 'Q' if position >= 128: overflow_token_iteration = position // 128 overflow_token = '%sE' % high[overflow_token_iteration] low_iteration = position % len_low # at this position the iteration starts with 'I' again (after 'P') if position >= 256: multiplier = (position // 128) - 1 position -= 128 * multiplier high_iteration = (position // len_low) % len_high return 'C%s%s%sAA' % (high[high_iteration], low[low_iteration], overflow_token) def update_watch_history(self, video_id, url): headers = {'Host': 'www.youtube.com', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36', 'Accept': 'image/webp,*/*;q=0.8', 'DNT': '1', 'Referer': 'https://www.youtube.com/tv', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'} params = {'noflv': '1', 'html5': '1', 'video_id': video_id, 'referrer': '', 'eurl': 'https://www.youtube.com/tv#/watch?v=%s' % video_id, 'skl': 'false', 'ns': 'yt', 'el': 'leanback', 'ps': 'leanback'} if self._access_token: params['access_token'] = self._access_token try: _ = requests.get(url, params=params, headers=headers, verify=self._verify, allow_redirects=True) except: _context.log_error('Failed to update watch history |%s|' % traceback.print_exc()) def get_video_streams(self, context, video_id=None, player_config=None, cookies=None): video_info = VideoInfo(context, access_token=self._access_token, language=self._language) video_streams = video_info.load_stream_infos(video_id, player_config, cookies) # update title for video_stream in video_streams: title = '%s (%s)' % (context.get_ui().bold(video_stream['title']), video_stream['container']) if 'audio' in video_stream and 'video' in video_stream: if video_stream['audio']['bitrate'] > 0 and video_stream['video']['encoding'] and \ video_stream['audio']['encoding']: title = '%s (%s; %s / %s@%d)' % (context.get_ui().bold(video_stream['title']), video_stream['container'], video_stream['video']['encoding'], video_stream['audio']['encoding'], video_stream['audio']['bitrate']) elif video_stream['video']['encoding'] and video_stream['audio']['encoding']: title = '%s (%s; %s / %s)' % (context.get_ui().bold(video_stream['title']), video_stream['container'], video_stream['video']['encoding'], video_stream['audio']['encoding']) elif 'audio' in video_stream and 'video' not in video_stream: if video_stream['audio']['encoding'] and video_stream['audio']['bitrate'] > 0: title = '%s (%s; %s@%d)' % (context.get_ui().bold(video_stream['title']), video_stream['container'], video_stream['audio']['encoding'], video_stream['audio']['bitrate']) elif 'audio' in video_stream or 'video' in video_stream: encoding = video_stream.get('audio', dict()).get('encoding') if not encoding: encoding = video_stream.get('video', dict()).get('encoding') if encoding: title = '%s (%s; %s)' % (context.get_ui().bold(video_stream['title']), video_stream['container'], encoding) video_stream['title'] = title return video_streams def remove_playlist(self, playlist_id): params = {'id': playlist_id, 'mine': 'true'} return self.perform_v3_request(method='DELETE', path='playlists', params=params) def get_supported_languages(self, language=None): _language = language if not _language: _language = self._language _language = _language.replace('-', '_') params = {'part': 'snippet', 'hl': _language} return self.perform_v3_request(method='GET', path='i18nLanguages', params=params) def get_supported_regions(self, language=None): _language = language if not _language: _language = self._language _language = _language.replace('-', '_') params = {'part': 'snippet', 'hl': _language} return self.perform_v3_request(method='GET', path='i18nRegions', params=params) def rename_playlist(self, playlist_id, new_title, privacy_status='private'): params = {'part': 'snippet,id,status'} post_data = {'kind': 'youtube#playlist', 'id': playlist_id, 'snippet': {'title': new_title}, 'status': {'privacyStatus': privacy_status}} return self.perform_v3_request(method='PUT', path='playlists', params=params, post_data=post_data) def create_playlist(self, title, privacy_status='private'): params = {'part': 'snippet,status'} post_data = {'kind': 'youtube#playlist', 'snippet': {'title': title}, 'status': {'privacyStatus': privacy_status}} return self.perform_v3_request(method='POST', path='playlists', params=params, post_data=post_data) def get_video_rating(self, video_id): if isinstance(video_id, list): video_id = ','.join(video_id) params = {'id': video_id} return self.perform_v3_request(method='GET', path='videos/getRating', params=params) def rate_video(self, video_id, rating='like'): """ Rate a video :param video_id: if of the video :param rating: [like|dislike|none] :return: """ params = {'id': video_id, 'rating': rating} return self.perform_v3_request(method='POST', path='videos/rate', params=params) def add_video_to_playlist(self, playlist_id, video_id): params = {'part': 'snippet', 'mine': 'true'} post_data = {'kind': 'youtube#playlistItem', 'snippet': {'playlistId': playlist_id, 'resourceId': {'kind': 'youtube#video', 'videoId': video_id}}} return self.perform_v3_request(method='POST', path='playlistItems', params=params, post_data=post_data) # noinspection PyUnusedLocal def remove_video_from_playlist(self, playlist_id, playlist_item_id): params = {'id': playlist_item_id} return self.perform_v3_request(method='DELETE', path='playlistItems', params=params) def unsubscribe(self, subscription_id): params = {'id': subscription_id} return self.perform_v3_request(method='DELETE', path='subscriptions', params=params) def subscribe(self, channel_id): params = {'part': 'snippet'} post_data = {'kind': 'youtube#subscription', 'snippet': {'resourceId': {'kind': 'youtube#channel', 'channelId': channel_id}}} return self.perform_v3_request(method='POST', path='subscriptions', params=params, post_data=post_data) def get_subscription(self, channel_id, order='alphabetical', page_token=''): """ :param channel_id: [channel-id|'mine'] :param order: ['alphabetical'|'relevance'|'unread'] :param page_token: :return: """ params = {'part': 'snippet', 'maxResults': str(self._max_results), 'order': order} if channel_id == 'mine': params['mine'] = 'true' else: params['channelId'] = channel_id if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='subscriptions', params=params) def get_guide_category(self, guide_category_id, page_token=''): params = {'part': 'snippet,contentDetails,brandingSettings', 'maxResults': str(self._max_results), 'categoryId': guide_category_id, 'regionCode': self._region, 'hl': self._language} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='channels', params=params) def get_guide_categories(self, page_token=''): params = {'part': 'snippet', 'maxResults': str(self._max_results), 'regionCode': self._region, 'hl': self._language} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='guideCategories', params=params) def get_popular_videos(self, page_token=''): params = {'part': 'snippet,status', 'maxResults': str(self._max_results), 'regionCode': self._region, 'hl': self._language, 'chart': 'mostPopular'} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='videos', params=params) def get_video_category(self, video_category_id, page_token=''): params = {'part': 'snippet,contentDetails,status', 'maxResults': str(self._max_results), 'videoCategoryId': video_category_id, 'chart': 'mostPopular', 'regionCode': self._region, 'hl': self._language} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='videos', params=params) def get_video_categories(self, page_token=''): params = {'part': 'snippet', 'maxResults': str(self._max_results), 'regionCode': self._region, 'hl': self._language} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='videoCategories', params=params) def _get_recommendations_for_home(self): # YouTube has deprecated this API, so use history and related items to form # a recommended set. We cache aggressively because searches incur a high # quota cost of 100 on the YouTube API. # Note this is a first stab attempt and can be refined a lot more. cache = _context.get_data_cache() # Do we have a cached result? cache_home_key = 'get-activities-home' cached = cache.get_item(cache.ONE_HOUR * 4, cache_home_key) if cache_home_key in cached and cached[cache_home_key].get('items'): return cached[cache_home_key] # Fetch existing list of items, if any items = [] cache_items_key = 'get-activities-home-items' cached = cache.get_item(cache.ONE_WEEK * 2, cache_items_key) if cache_items_key in cached: items = cached[cache_items_key] # Fetch history and recommended items. Use threads for faster execution. def helper(video_id, responses): _context.log_debug( 'Method get_activities: doing expensive API fetch for related' 'items for video %s' % video_id ) di = self.get_related_videos(video_id, max_results=10) if 'items' in di: # Record for which video we fetched the items for item in di['items']: item['plugin_fetched_for'] = video_id responses.extend(di['items']) history = self.get_watch_history() result = { 'kind': 'youtube#activityListResponse', 'items': [] } if not history.get('items'): return result threads = [] candidates = [] already_fetched_for_video_ids = [item['plugin_fetched_for'] for item in items] history_items = [item for item in history['items'] if re.match(r'(?P<video_id>[\w-]{11})', item['id'])] # TODO: # It would be nice to make this 8 user configurable for item in history_items[:8]: video_id = item['id'] if video_id not in already_fetched_for_video_ids: thread = threading.Thread(target=helper, args=(video_id, candidates)) threads.append(thread) thread.start() for thread in threads: thread.join() # Prepend new candidates to items seen = [item['id']['videoId'] for item in items] for candidate in candidates: vid = candidate['id']['videoId'] if vid not in seen: seen.append(vid) candidate['plugin_created_date'] = datetime_parser.now().strftime('%Y-%m-%dT%H:%M:%SZ') items.insert(0, candidate) # Truncate items to keep it manageable, and cache items = items[:500] cache.set(cache_items_key, json.dumps(items)) # Build the result set items.sort( key=lambda a: datetime_parser.parse(a['plugin_created_date']), reverse=True ) sorted_items = [] counter = 0 channel_counts = {} while items: counter += 1 # Hard stop on iteration. Good enough for our purposes. if counter >= 1000: break # Reset channel counts on a new page if counter % 50 == 0: channel_counts = {} # Ensure a single channel isn't hogging the page item = items.pop() channel_id = item['snippet']['channelId'] channel_counts.setdefault(channel_id, 0) if channel_counts[channel_id] <= 3: # Use the item channel_counts[channel_id] = channel_counts[channel_id] + 1 item["page_number"] = counter // 50 sorted_items.append(item) else: # Move the item to the end of the list items.append(item) # Finally sort items per page by date for a better distribution now = datetime_parser.now() sorted_items.sort( key=lambda a: ( a['page_number'], datetime_parser.total_seconds( now - datetime_parser.parse(a['snippet']['publishedAt']) ) ), ) # Finalize result result['items'] = sorted_items """ # TODO: # Enable pagination result['pageInfo'] = { 'resultsPerPage': 50, 'totalResults': len(sorted_items) } """ # Update cache cache.set(cache_home_key, json.dumps(result)) # If there are no sorted_items we fall back to default API behaviour return result def get_activities(self, channel_id, page_token=''): params = {'part': 'snippet,contentDetails', 'maxResults': str(self._max_results), 'regionCode': self._region, 'hl': self._language} if channel_id == 'home': recommended = self._get_recommendations_for_home() if 'items' in recommended and recommended.get('items'): return recommended if channel_id == 'home': params['home'] = 'true' elif channel_id == 'mine': params['mine'] = 'true' else: params['channelId'] = channel_id if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='activities', params=params) def get_channel_sections(self, channel_id): params = {'part': 'snippet,contentDetails', 'regionCode': self._region, 'hl': self._language} if channel_id == 'mine': params['mine'] = 'true' else: params['channelId'] = channel_id return self.perform_v3_request(method='GET', path='channelSections', params=params) def get_playlists_of_channel(self, channel_id, page_token=''): params = {'part': 'snippet', 'maxResults': str(self._max_results)} if channel_id != 'mine': params['channelId'] = channel_id else: params['mine'] = 'true' if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='playlists', params=params) def get_playlist_item_id_of_video_id(self, playlist_id, video_id, page_token=''): old_max_results = self._max_results self._max_results = 50 json_data = self.get_playlist_items(playlist_id=playlist_id, page_token=page_token) self._max_results = old_max_results items = json_data.get('items', []) for item in items: playlist_item_id = item['id'] playlist_video_id = item.get('snippet', {}).get('resourceId', {}).get('videoId', '') if playlist_video_id and playlist_video_id == video_id: return playlist_item_id next_page_token = json_data.get('nextPageToken', '') if next_page_token: return self.get_playlist_item_id_of_video_id(playlist_id=playlist_id, video_id=video_id, page_token=next_page_token) return None def get_playlist_items(self, playlist_id, page_token='', max_results=None): # prepare params max_results = str(self._max_results) if max_results is None else str(max_results) params = {'part': 'snippet', 'maxResults': max_results, 'playlistId': playlist_id} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='playlistItems', params=params) def get_channel_by_username(self, username): """ Returns a collection of zero or more channel resources that match the request criteria. :param username: retrieve channel_id for username :return: """ params = {'part': 'id'} if username == 'mine': params.update({'mine': 'true'}) else: params.update({'forUsername': username}) return self.perform_v3_request(method='GET', path='channels', params=params) def get_channels(self, channel_id): """ Returns a collection of zero or more channel resources that match the request criteria. :param channel_id: list or comma-separated list of the YouTube channel ID(s) :return: """ if isinstance(channel_id, list): channel_id = ','.join(channel_id) params = {'part': 'snippet,contentDetails,brandingSettings'} if channel_id != 'mine': params['id'] = channel_id else: params['mine'] = 'true' return self.perform_v3_request(method='GET', path='channels', params=params) def get_disliked_videos(self, page_token=''): # prepare page token if not page_token: page_token = '' # prepare params params = {'part': 'snippet,status', 'myRating': 'dislike', 'maxResults': str(self._max_results)} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='videos', params=params) def get_videos(self, video_id, live_details=False): """ Returns a list of videos that match the API request parameters :param video_id: list of video ids :param live_details: also retrieve liveStreamingDetails :return: """ if isinstance(video_id, list): video_id = ','.join(video_id) parts = ['snippet,contentDetails,status'] if live_details: parts.append(',liveStreamingDetails') params = {'part': ''.join(parts), 'id': video_id} return self.perform_v3_request(method='GET', path='videos', params=params) def get_playlists(self, playlist_id): if isinstance(playlist_id, list): playlist_id = ','.join(playlist_id) params = {'part': 'snippet,contentDetails', 'id': playlist_id} return self.perform_v3_request(method='GET', path='playlists', params=params) def get_live_events(self, event_type='live', order='relevance', page_token='', location=False): """ :param event_type: one of: 'live', 'completed', 'upcoming' :param order: one of: 'date', 'rating', 'relevance', 'title', 'videoCount', 'viewCount' :param page_token: :param location: bool, use geolocation :return: """ # prepare page token if not page_token: page_token = '' # prepare params params = {'part': 'snippet', 'type': 'video', 'order': order, 'eventType': event_type, 'regionCode': self._region, 'hl': self._language, 'relevanceLanguage': self._language, 'maxResults': str(self._max_results)} if location: location = _context.get_settings().get_location() if location: params['location'] = location params['locationRadius'] = _context.get_settings().get_location_radius() if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='search', params=params) def get_related_videos(self, video_id, page_token='', max_results=0): # prepare page token if not page_token: page_token = '' max_results = self._max_results if max_results <= 0 else max_results # prepare params params = {'relatedToVideoId': video_id, 'part': 'snippet', 'type': 'video', 'regionCode': self._region, 'hl': self._language, 'maxResults': str(max_results)} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='search', params=params) def get_parent_comments(self, video_id, page_token='', max_results=0): max_results = self._max_results if max_results <= 0 else max_results # prepare params params = {'part': 'snippet', 'videoId': video_id, 'order': 'relevance', 'textFormat': 'plainText', 'maxResults': str(max_results)} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='commentThreads', params=params, no_login=True) def get_child_comments(self, parent_id, page_token='', max_results=0): max_results = self._max_results if max_results <= 0 else max_results # prepare params params = {'part': 'snippet', 'parentId': parent_id, 'textFormat': 'plainText', 'maxResults': str(max_results)} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='comments', params=params, no_login=True) def get_channel_videos(self, channel_id, page_token=''): """ Returns a collection of video search results for the specified channel_id """ params = {'part': 'snippet', 'hl': self._language, 'maxResults': str(self._max_results), 'type': 'video', 'safeSearch': 'none', 'order': 'date'} if channel_id == 'mine': params['forMine'] = 'true' else: params['channelId'] = channel_id if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='search', params=params) def search(self, q, search_type=None, event_type='', channel_id='', order='relevance', safe_search='moderate', page_token='', location=False): """ Returns a collection of search results that match the query parameters specified in the API request. By default, a search result set identifies matching video, channel, and playlist resources, but you can also configure queries to only retrieve a specific type of resource. :param q: :param search_type: acceptable values are: 'video' | 'channel' | 'playlist' :param event_type: 'live', 'completed', 'upcoming' :param channel_id: limit search to channel id :param order: one of: 'date', 'rating', 'relevance', 'title', 'videoCount', 'viewCount' :param safe_search: one of: 'moderate', 'none', 'strict' :param page_token: can be '' :param location: bool, use geolocation :return: """ if search_type is None: search_type = ['video', 'channel', 'playlist'] # prepare search type if not search_type: search_type = '' if isinstance(search_type, list): search_type = ','.join(search_type) # prepare page token if not page_token: page_token = '' # prepare params params = {'q': q, 'part': 'snippet', 'regionCode': self._region, 'hl': self._language, 'relevanceLanguage': self._language, 'maxResults': str(self._max_results)} if event_type and event_type in ['live', 'upcoming', 'completed']: params['eventType'] = event_type if search_type: params['type'] = search_type if channel_id: params['channelId'] = channel_id if order: params['order'] = order if safe_search: params['safeSearch'] = safe_search if page_token: params['pageToken'] = page_token video_only_params = ['eventType', 'videoCaption', 'videoCategoryId', 'videoDefinition', 'videoDimension', 'videoDuration', 'videoEmbeddable', 'videoLicense', 'videoSyndicated', 'videoType', 'relatedToVideoId', 'forMine'] for key in video_only_params: if params.get(key) is not None: params['type'] = 'video' break if params['type'] == 'video' and location: location = _context.get_settings().get_location() if location: params['location'] = location params['locationRadius'] = _context.get_settings().get_location_radius() return self.perform_v3_request(method='GET', path='search', params=params) def get_my_subscriptions(self, page_token=None, offset=0): if not page_token: page_token = '' result = {'items': [], 'next_page_token': page_token, 'offset': offset} def _perform(_page_token, _offset, _result): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'FEsubscriptions' } if _page_token: _post_data['continuation'] = _page_token _json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data) _data = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}])[0].get( 'shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {}) if not _data: _data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {}) _items = _data.get('items', []) if not _result: _result = {'items': []} _new_offset = self._max_results - len(_result['items']) + _offset if _offset > 0: _items = _items[_offset:] _result['offset'] = _new_offset for _item in _items: _item = _item.get('gridVideoRenderer', {}) if _item: _video_item = {'id': _item['videoId'], 'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''), 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')} _result['items'].append(_video_item) _continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '') if _continuations and len(_result['items']) <= self._max_results: _result['next_page_token'] = _continuations if len(_result['items']) < self._max_results: _result = _perform(_page_token=_continuations, _offset=0, _result=_result) # trim result if len(_result['items']) > self._max_results: _items = _result['items'] _items = _items[:self._max_results] _result['items'] = _items _result['continue'] = True if 'offset' in _result and _result['offset'] >= 100: _result['offset'] -= 100 if len(_result['items']) < self._max_results: if 'continue' in _result: del _result['continue'] if 'next_page_token' in _result: del _result['next_page_token'] if 'offset' in _result: del _result['offset'] return _result return _perform(_page_token=page_token, _offset=offset, _result=result) def get_purchases(self, page_token, offset): if not page_token: page_token = '' shelf_title = 'Purchases' result = {'items': [], 'next_page_token': page_token, 'offset': offset} def _perform(_page_token, _offset, _result, _shelf_index=None): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } } } if _page_token: _post_data['continuation'] = _page_token else: _post_data['browseId'] = 'FEmy_youtube' _json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data) _data = {} if 'continuationContents' in _json_data: _data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {}) elif 'contents' in _json_data: _contents = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}]) if _shelf_index is None: _shelf_index = get_shelf_index_by_title(_context, _json_data, shelf_title) if _shelf_index is not None: _data = _contents[_shelf_index].get('shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {}) _items = _data.get('items', []) if not _result: _result = {'items': []} _new_offset = self._max_results - len(_result['items']) + _offset if _offset > 0: _items = _items[_offset:] _result['offset'] = _new_offset for _listItem in _items: _item = _listItem.get('gridVideoRenderer', {}) if _item: _video_item = {'id': _item['videoId'], 'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''), 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')} _result['items'].append(_video_item) _item = _listItem.get('gridPlaylistRenderer', {}) if _item: play_next_page_token = '' while True: json_playlist_data = self.get_playlist_items(_item['playlistId'], page_token=play_next_page_token) _playListItems = json_playlist_data.get('items', {}) for _playListItem in _playListItems: _playListItem = _playListItem.get('snippet', {}) if _playListItem: _video_item = {'id': _playListItem.get('resourceId', {}).get('videoId', ''), 'title': _playListItem['title'], 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')} _result['items'].append(_video_item) play_next_page_token = json_playlist_data.get('nextPageToken', '') if not play_next_page_token or _context.abort_requested(): break _continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '') if _continuations and len(_result['items']) <= self._max_results: _result['next_page_token'] = _continuations if len(_result['items']) < self._max_results: _result = _perform(_page_token=_continuations, _offset=0, _result=_result, _shelf_index=shelf_index) # trim result if len(_result['items']) > self._max_results: _items = _result['items'] _items = _items[:self._max_results] _result['items'] = _items _result['continue'] = True if len(_result['items']) < self._max_results: if 'continue' in _result: del _result['continue'] if 'next_page_token' in _result: del _result['next_page_token'] if 'offset' in _result: del _result['offset'] return _result shelf_index = None if self._language != 'en' and not self._language.startswith('en-') and not page_token: # shelf index is a moving target, make a request in english first to find the correct index by title _en_post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': 'US', 'acceptLanguage': 'en-US' }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'FEmy_youtube' } json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_en_post_data) shelf_index = get_shelf_index_by_title(_context, json_data, shelf_title) result = _perform(_page_token=page_token, _offset=offset, _result=result, _shelf_index=shelf_index) return result def get_saved_playlists(self, page_token, offset): if not page_token: page_token = '' shelf_title = 'Saved Playlists' result = {'items': [], 'next_page_token': page_token, 'offset': offset} def _perform(_page_token, _offset, _result, _shelf_index=None): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } } } if _page_token: _post_data['continuation'] = _page_token else: _post_data['browseId'] = 'FEmy_youtube' _json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data) _data = {} if 'continuationContents' in _json_data: _data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {}) elif 'contents' in _json_data: _contents = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}]) if _shelf_index is None: _shelf_index = get_shelf_index_by_title(_context, _json_data, shelf_title) if _shelf_index is not None: _data = _contents[_shelf_index].get('shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {}) _items = _data.get('items', []) if not _result: _result = {'items': []} _new_offset = self._max_results - len(_result['items']) + _offset if _offset > 0: _items = _items[_offset:] _result['offset'] = _new_offset for _item in _items: _item = _item.get('gridPlaylistRenderer', {}) if _item: _video_item = {'id': _item['playlistId'], 'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''), 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', ''), 'channel_id': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('navigationEndpoint', {}).get('browseEndpoint', {}).get('browseId', ''), 'thumbnails': {'default': {'url': ''}, 'medium': {'url': ''}, 'high': {'url': ''}}} _thumbs = _item.get('thumbnail', {}).get('thumbnails', [{}]) for _thumb in _thumbs: _thumb_url = _thumb.get('url', '') if _thumb_url.startswith('//'): _thumb_url = ''.join(['https:', _thumb_url]) if _thumb_url.endswith('/default.jpg'): _video_item['thumbnails']['default']['url'] = _thumb_url elif _thumb_url.endswith('/mqdefault.jpg'): _video_item['thumbnails']['medium']['url'] = _thumb_url elif _thumb_url.endswith('/hqdefault.jpg'): _video_item['thumbnails']['high']['url'] = _thumb_url _result['items'].append(_video_item) _continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '') if _continuations and len(_result['items']) <= self._max_results: _result['next_page_token'] = _continuations if len(_result['items']) < self._max_results: _result = _perform(_page_token=_continuations, _offset=0, _result=_result, _shelf_index=_shelf_index) # trim result if len(_result['items']) > self._max_results: _items = _result['items'] _items = _items[:self._max_results] _result['items'] = _items _result['continue'] = True if len(_result['items']) < self._max_results: if 'continue' in _result: del _result['continue'] if 'next_page_token' in _result: del _result['next_page_token'] if 'offset' in _result: del _result['offset'] return _result shelf_index = None if self._language != 'en' and not self._language.startswith('en-') and not page_token: # shelf index is a moving target, make a request in english first to find the correct index by title _en_post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': 'US', 'acceptLanguage': 'en-US' }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'FEmy_youtube' } json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_en_post_data) shelf_index = get_shelf_index_by_title(_context, json_data, shelf_title) result = _perform(_page_token=page_token, _offset=offset, _result=result, _shelf_index=shelf_index) return result def clear_watch_history(self): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } } } _json_data = self.perform_v1_tv_request(method='POST', path='history/clear_watch_history', post_data=_post_data) return _json_data def get_watch_history(self, page_token=None, offset=0): if not page_token: page_token = '' result = {'items': [], 'next_page_token': page_token, 'offset': offset} def _perform(_page_token, _offset, _result): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'FEhistory' } if _page_token: _post_data['continuation'] = _page_token _json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data) _data = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}])[0].get( 'shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {}) if not _data: _data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {}) _items = _data.get('items', []) if not _result: _result = {'items': []} _new_offset = self._max_results - len(_result['items']) + _offset if _offset > 0: _items = _items[_offset:] _result['offset'] = _new_offset for _item in _items: _item = _item.get('gridVideoRenderer', {}) if _item: _video_item = {'id': _item['videoId'], 'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''), 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')} _result['items'].append(_video_item) _continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '') if _continuations and len(_result['items']) <= self._max_results: _result['next_page_token'] = _continuations if len(_result['items']) < self._max_results: _result = _perform(_page_token=_continuations, _offset=0, _result=_result) # trim result if len(_result['items']) > self._max_results: _items = _result['items'] _items = _items[:self._max_results] _result['items'] = _items _result['continue'] = True if len(_result['items']) < self._max_results: if 'continue' in _result: del _result['continue'] if 'next_page_token' in _result: del _result['next_page_token'] if 'offset' in _result: del _result['offset'] return _result return _perform(_page_token=page_token, _offset=offset, _result=result) def get_watch_later_id(self): watch_later_id = '' def _get_items(_continuation=None): post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': 'US', 'acceptLanguage': 'en-US' }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'default' } if _continuation: post_data['continuation'] = _continuation return self.perform_v1_tv_request(method='POST', path='browse', post_data=post_data) current_page = 1 pages = 30 # 28 seems to be page limit, add a couple page padding, loop will break when there is no next page data progress_dialog = _context.get_ui().create_progress_dialog(_context.get_name(), _context.localize(constants.localize.COMMON_PLEASE_WAIT), background=True) progress_dialog.set_total(pages) progress_dialog.update(steps=1, text=_context.localize(constants.localize.WATCH_LATER_RETRIEVAL_PAGE) % str(current_page)) try: json_data = _get_items() while current_page < pages: contents = json_data.get('contents', json_data.get('continuationContents', {})) section = contents.get('sectionListRenderer', contents.get('sectionListContinuation', {})) contents = section.get('contents', [{}]) for shelf in contents: renderer = shelf.get('shelfRenderer', {}) endpoint = renderer.get('endpoint', {}) browse_endpoint = endpoint.get('browseEndpoint', {}) browse_id = browse_endpoint.get('browseId', '') title = renderer.get('title', {}) title_runs = title.get('runs', [{}])[0] title_text = title_runs.get('text', '') if (title_text.lower() == 'watch later') and (browse_id.startswith('VLPL') or browse_id.startswith('PL')): watch_later_id = browse_id.lstrip('VL') break if watch_later_id: break continuations = section.get('continuations', [{}])[0] next_continuation_data = continuations.get('nextContinuationData', {}) continuation = next_continuation_data.get('continuation', '') if continuation: current_page += 1 progress_dialog.update(steps=1, text=_context.localize(constants.localize.WATCH_LATER_RETRIEVAL_PAGE) % str(current_page)) json_data = _get_items(continuation) continue else: break finally: progress_dialog.close() return watch_later_id def perform_v3_request(self, method='GET', headers=None, path=None, post_data=None, params=None, allow_redirects=True, no_login=False): yt_config = self._config if not yt_config.get('key'): return { 'error': { 'errors': [{'reason': 'accessNotConfigured'}], 'message': 'No API keys provided' } } # params if not params: params = {} _params = {'key': yt_config['key']} _params.update(params) # headers if not headers: headers = {} _headers = {'Host': 'www.googleapis.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36', 'Accept-Encoding': 'gzip, deflate'} # a config can decide if a token is allowed if self._access_token and yt_config.get('token-allowed', True) and not no_login: _headers['Authorization'] = 'Bearer %s' % self._access_token _headers.update(headers) # url _url = 'https://www.googleapis.com/youtube/v3/%s' % path.strip('/') result = None log_params = copy.deepcopy(params) if 'location' in log_params: log_params['location'] = 'xx.xxxx,xx.xxxx' _context.log_debug('[data] v3 request: |{0}| path: |{1}| params: |{2}| post_data: |{3}|'.format(method, path, log_params, post_data)) if method == 'GET': result = requests.get(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'POST': _headers['content-type'] = 'application/json' result = requests.post(_url, json=post_data, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'PUT': _headers['content-type'] = 'application/json' result = requests.put(_url, json=post_data, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'DELETE': result = requests.delete(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) if result is None: return {} if result.headers.get('content-type', '').startswith('application/json'): return result.json() def perform_v1_tv_request(self, method='GET', headers=None, path=None, post_data=None, params=None, allow_redirects=True): # params if not params: params = {} _params = {'key': self._config_tv['key']} _params.update(params) # headers if not headers: headers = {} _headers = {'Host': 'www.googleapis.com', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36', 'Origin': 'https://www.youtube.com', 'Accept': '*/*', 'DNT': '1', 'Referer': 'https://www.youtube.com/tv', 'Accept-Encoding': 'gzip', 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'} if self._access_token_tv: _headers['Authorization'] = 'Bearer %s' % self._access_token_tv _headers.update(headers) # url _url = 'https://www.googleapis.com/youtubei/v1/%s' % path.strip('/') result = None _context.log_debug('[i] v1 request: |{0}| path: |{1}| params: |{2}| post_data: |{3}|'.format(method, path, params, post_data)) if method == 'GET': result = requests.get(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'POST': _headers['content-type'] = 'application/json' result = requests.post(_url, json=post_data, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'PUT': _headers['content-type'] = 'application/json' result = requests.put(_url, json=post_data, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'DELETE': result = requests.delete(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) if result is None: return {} if result.headers.get('content-type', '').startswith('application/json'): return result.json()
42.10632
179
0.533505
import copy import json import re import threading import traceback import requests from .login_client import LoginClient from ..helper.video_info import VideoInfo from ..helper.utils import get_shelf_index_by_title from ...kodion import constants from ...kodion import Context from ...kodion.utils import datetime_parser _context = Context(plugin_id='plugin.video.youtube') class YouTube(LoginClient): def __init__(self, config=None, language='en-US', region='US', items_per_page=50, access_token='', access_token_tv=''): if config is None: config = {} LoginClient.__init__(self, config=config, language=language, region=region, access_token=access_token, access_token_tv=access_token_tv) self._max_results = items_per_page def get_max_results(self): return self._max_results def get_language(self): return self._language def get_region(self): return self._region @staticmethod def calculate_next_page_token(page, max_result): page -= 1 low = 'AEIMQUYcgkosw048' high = 'ABCDEFGHIJKLMNOP' len_low = len(low) len_high = len(high) position = page * max_result overflow_token = 'Q' if position >= 128: overflow_token_iteration = position // 128 overflow_token = '%sE' % high[overflow_token_iteration] low_iteration = position % len_low if position >= 256: multiplier = (position // 128) - 1 position -= 128 * multiplier high_iteration = (position // len_low) % len_high return 'C%s%s%sAA' % (high[high_iteration], low[low_iteration], overflow_token) def update_watch_history(self, video_id, url): headers = {'Host': 'www.youtube.com', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36', 'Accept': 'image/webp,*/*;q=0.8', 'DNT': '1', 'Referer': 'https://www.youtube.com/tv', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'} params = {'noflv': '1', 'html5': '1', 'video_id': video_id, 'referrer': '', 'eurl': 'https://www.youtube.com/tv#/watch?v=%s' % video_id, 'skl': 'false', 'ns': 'yt', 'el': 'leanback', 'ps': 'leanback'} if self._access_token: params['access_token'] = self._access_token try: _ = requests.get(url, params=params, headers=headers, verify=self._verify, allow_redirects=True) except: _context.log_error('Failed to update watch history |%s|' % traceback.print_exc()) def get_video_streams(self, context, video_id=None, player_config=None, cookies=None): video_info = VideoInfo(context, access_token=self._access_token, language=self._language) video_streams = video_info.load_stream_infos(video_id, player_config, cookies) for video_stream in video_streams: title = '%s (%s)' % (context.get_ui().bold(video_stream['title']), video_stream['container']) if 'audio' in video_stream and 'video' in video_stream: if video_stream['audio']['bitrate'] > 0 and video_stream['video']['encoding'] and \ video_stream['audio']['encoding']: title = '%s (%s; %s / %s@%d)' % (context.get_ui().bold(video_stream['title']), video_stream['container'], video_stream['video']['encoding'], video_stream['audio']['encoding'], video_stream['audio']['bitrate']) elif video_stream['video']['encoding'] and video_stream['audio']['encoding']: title = '%s (%s; %s / %s)' % (context.get_ui().bold(video_stream['title']), video_stream['container'], video_stream['video']['encoding'], video_stream['audio']['encoding']) elif 'audio' in video_stream and 'video' not in video_stream: if video_stream['audio']['encoding'] and video_stream['audio']['bitrate'] > 0: title = '%s (%s; %s@%d)' % (context.get_ui().bold(video_stream['title']), video_stream['container'], video_stream['audio']['encoding'], video_stream['audio']['bitrate']) elif 'audio' in video_stream or 'video' in video_stream: encoding = video_stream.get('audio', dict()).get('encoding') if not encoding: encoding = video_stream.get('video', dict()).get('encoding') if encoding: title = '%s (%s; %s)' % (context.get_ui().bold(video_stream['title']), video_stream['container'], encoding) video_stream['title'] = title return video_streams def remove_playlist(self, playlist_id): params = {'id': playlist_id, 'mine': 'true'} return self.perform_v3_request(method='DELETE', path='playlists', params=params) def get_supported_languages(self, language=None): _language = language if not _language: _language = self._language _language = _language.replace('-', '_') params = {'part': 'snippet', 'hl': _language} return self.perform_v3_request(method='GET', path='i18nLanguages', params=params) def get_supported_regions(self, language=None): _language = language if not _language: _language = self._language _language = _language.replace('-', '_') params = {'part': 'snippet', 'hl': _language} return self.perform_v3_request(method='GET', path='i18nRegions', params=params) def rename_playlist(self, playlist_id, new_title, privacy_status='private'): params = {'part': 'snippet,id,status'} post_data = {'kind': 'youtube#playlist', 'id': playlist_id, 'snippet': {'title': new_title}, 'status': {'privacyStatus': privacy_status}} return self.perform_v3_request(method='PUT', path='playlists', params=params, post_data=post_data) def create_playlist(self, title, privacy_status='private'): params = {'part': 'snippet,status'} post_data = {'kind': 'youtube#playlist', 'snippet': {'title': title}, 'status': {'privacyStatus': privacy_status}} return self.perform_v3_request(method='POST', path='playlists', params=params, post_data=post_data) def get_video_rating(self, video_id): if isinstance(video_id, list): video_id = ','.join(video_id) params = {'id': video_id} return self.perform_v3_request(method='GET', path='videos/getRating', params=params) def rate_video(self, video_id, rating='like'): params = {'id': video_id, 'rating': rating} return self.perform_v3_request(method='POST', path='videos/rate', params=params) def add_video_to_playlist(self, playlist_id, video_id): params = {'part': 'snippet', 'mine': 'true'} post_data = {'kind': 'youtube#playlistItem', 'snippet': {'playlistId': playlist_id, 'resourceId': {'kind': 'youtube#video', 'videoId': video_id}}} return self.perform_v3_request(method='POST', path='playlistItems', params=params, post_data=post_data) def remove_video_from_playlist(self, playlist_id, playlist_item_id): params = {'id': playlist_item_id} return self.perform_v3_request(method='DELETE', path='playlistItems', params=params) def unsubscribe(self, subscription_id): params = {'id': subscription_id} return self.perform_v3_request(method='DELETE', path='subscriptions', params=params) def subscribe(self, channel_id): params = {'part': 'snippet'} post_data = {'kind': 'youtube#subscription', 'snippet': {'resourceId': {'kind': 'youtube#channel', 'channelId': channel_id}}} return self.perform_v3_request(method='POST', path='subscriptions', params=params, post_data=post_data) def get_subscription(self, channel_id, order='alphabetical', page_token=''): params = {'part': 'snippet', 'maxResults': str(self._max_results), 'order': order} if channel_id == 'mine': params['mine'] = 'true' else: params['channelId'] = channel_id if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='subscriptions', params=params) def get_guide_category(self, guide_category_id, page_token=''): params = {'part': 'snippet,contentDetails,brandingSettings', 'maxResults': str(self._max_results), 'categoryId': guide_category_id, 'regionCode': self._region, 'hl': self._language} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='channels', params=params) def get_guide_categories(self, page_token=''): params = {'part': 'snippet', 'maxResults': str(self._max_results), 'regionCode': self._region, 'hl': self._language} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='guideCategories', params=params) def get_popular_videos(self, page_token=''): params = {'part': 'snippet,status', 'maxResults': str(self._max_results), 'regionCode': self._region, 'hl': self._language, 'chart': 'mostPopular'} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='videos', params=params) def get_video_category(self, video_category_id, page_token=''): params = {'part': 'snippet,contentDetails,status', 'maxResults': str(self._max_results), 'videoCategoryId': video_category_id, 'chart': 'mostPopular', 'regionCode': self._region, 'hl': self._language} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='videos', params=params) def get_video_categories(self, page_token=''): params = {'part': 'snippet', 'maxResults': str(self._max_results), 'regionCode': self._region, 'hl': self._language} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='videoCategories', params=params) def _get_recommendations_for_home(self): cache = _context.get_data_cache() cache_home_key = 'get-activities-home' cached = cache.get_item(cache.ONE_HOUR * 4, cache_home_key) if cache_home_key in cached and cached[cache_home_key].get('items'): return cached[cache_home_key] items = [] cache_items_key = 'get-activities-home-items' cached = cache.get_item(cache.ONE_WEEK * 2, cache_items_key) if cache_items_key in cached: items = cached[cache_items_key] def helper(video_id, responses): _context.log_debug( 'Method get_activities: doing expensive API fetch for related' 'items for video %s' % video_id ) di = self.get_related_videos(video_id, max_results=10) if 'items' in di: for item in di['items']: item['plugin_fetched_for'] = video_id responses.extend(di['items']) history = self.get_watch_history() result = { 'kind': 'youtube#activityListResponse', 'items': [] } if not history.get('items'): return result threads = [] candidates = [] already_fetched_for_video_ids = [item['plugin_fetched_for'] for item in items] history_items = [item for item in history['items'] if re.match(r'(?P<video_id>[\w-]{11})', item['id'])] for item in history_items[:8]: video_id = item['id'] if video_id not in already_fetched_for_video_ids: thread = threading.Thread(target=helper, args=(video_id, candidates)) threads.append(thread) thread.start() for thread in threads: thread.join() seen = [item['id']['videoId'] for item in items] for candidate in candidates: vid = candidate['id']['videoId'] if vid not in seen: seen.append(vid) candidate['plugin_created_date'] = datetime_parser.now().strftime('%Y-%m-%dT%H:%M:%SZ') items.insert(0, candidate) items = items[:500] cache.set(cache_items_key, json.dumps(items)) items.sort( key=lambda a: datetime_parser.parse(a['plugin_created_date']), reverse=True ) sorted_items = [] counter = 0 channel_counts = {} while items: counter += 1 if counter >= 1000: break if counter % 50 == 0: channel_counts = {} item = items.pop() channel_id = item['snippet']['channelId'] channel_counts.setdefault(channel_id, 0) if channel_counts[channel_id] <= 3: # Use the item channel_counts[channel_id] = channel_counts[channel_id] + 1 item["page_number"] = counter // 50 sorted_items.append(item) else: # Move the item to the end of the list items.append(item) # Finally sort items per page by date for a better distribution now = datetime_parser.now() sorted_items.sort( key=lambda a: ( a['page_number'], datetime_parser.total_seconds( now - datetime_parser.parse(a['snippet']['publishedAt']) ) ), ) # Finalize result result['items'] = sorted_items # Update cache cache.set(cache_home_key, json.dumps(result)) # If there are no sorted_items we fall back to default API behaviour return result def get_activities(self, channel_id, page_token=''): params = {'part': 'snippet,contentDetails', 'maxResults': str(self._max_results), 'regionCode': self._region, 'hl': self._language} if channel_id == 'home': recommended = self._get_recommendations_for_home() if 'items' in recommended and recommended.get('items'): return recommended if channel_id == 'home': params['home'] = 'true' elif channel_id == 'mine': params['mine'] = 'true' else: params['channelId'] = channel_id if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='activities', params=params) def get_channel_sections(self, channel_id): params = {'part': 'snippet,contentDetails', 'regionCode': self._region, 'hl': self._language} if channel_id == 'mine': params['mine'] = 'true' else: params['channelId'] = channel_id return self.perform_v3_request(method='GET', path='channelSections', params=params) def get_playlists_of_channel(self, channel_id, page_token=''): params = {'part': 'snippet', 'maxResults': str(self._max_results)} if channel_id != 'mine': params['channelId'] = channel_id else: params['mine'] = 'true' if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='playlists', params=params) def get_playlist_item_id_of_video_id(self, playlist_id, video_id, page_token=''): old_max_results = self._max_results self._max_results = 50 json_data = self.get_playlist_items(playlist_id=playlist_id, page_token=page_token) self._max_results = old_max_results items = json_data.get('items', []) for item in items: playlist_item_id = item['id'] playlist_video_id = item.get('snippet', {}).get('resourceId', {}).get('videoId', '') if playlist_video_id and playlist_video_id == video_id: return playlist_item_id next_page_token = json_data.get('nextPageToken', '') if next_page_token: return self.get_playlist_item_id_of_video_id(playlist_id=playlist_id, video_id=video_id, page_token=next_page_token) return None def get_playlist_items(self, playlist_id, page_token='', max_results=None): # prepare params max_results = str(self._max_results) if max_results is None else str(max_results) params = {'part': 'snippet', 'maxResults': max_results, 'playlistId': playlist_id} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='playlistItems', params=params) def get_channel_by_username(self, username): params = {'part': 'id'} if username == 'mine': params.update({'mine': 'true'}) else: params.update({'forUsername': username}) return self.perform_v3_request(method='GET', path='channels', params=params) def get_channels(self, channel_id): if isinstance(channel_id, list): channel_id = ','.join(channel_id) params = {'part': 'snippet,contentDetails,brandingSettings'} if channel_id != 'mine': params['id'] = channel_id else: params['mine'] = 'true' return self.perform_v3_request(method='GET', path='channels', params=params) def get_disliked_videos(self, page_token=''): # prepare page token if not page_token: page_token = '' # prepare params params = {'part': 'snippet,status', 'myRating': 'dislike', 'maxResults': str(self._max_results)} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='videos', params=params) def get_videos(self, video_id, live_details=False): if isinstance(video_id, list): video_id = ','.join(video_id) parts = ['snippet,contentDetails,status'] if live_details: parts.append(',liveStreamingDetails') params = {'part': ''.join(parts), 'id': video_id} return self.perform_v3_request(method='GET', path='videos', params=params) def get_playlists(self, playlist_id): if isinstance(playlist_id, list): playlist_id = ','.join(playlist_id) params = {'part': 'snippet,contentDetails', 'id': playlist_id} return self.perform_v3_request(method='GET', path='playlists', params=params) def get_live_events(self, event_type='live', order='relevance', page_token='', location=False): # prepare page token if not page_token: page_token = '' # prepare params params = {'part': 'snippet', 'type': 'video', 'order': order, 'eventType': event_type, 'regionCode': self._region, 'hl': self._language, 'relevanceLanguage': self._language, 'maxResults': str(self._max_results)} if location: location = _context.get_settings().get_location() if location: params['location'] = location params['locationRadius'] = _context.get_settings().get_location_radius() if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='search', params=params) def get_related_videos(self, video_id, page_token='', max_results=0): # prepare page token if not page_token: page_token = '' max_results = self._max_results if max_results <= 0 else max_results # prepare params params = {'relatedToVideoId': video_id, 'part': 'snippet', 'type': 'video', 'regionCode': self._region, 'hl': self._language, 'maxResults': str(max_results)} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='search', params=params) def get_parent_comments(self, video_id, page_token='', max_results=0): max_results = self._max_results if max_results <= 0 else max_results # prepare params params = {'part': 'snippet', 'videoId': video_id, 'order': 'relevance', 'textFormat': 'plainText', 'maxResults': str(max_results)} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='commentThreads', params=params, no_login=True) def get_child_comments(self, parent_id, page_token='', max_results=0): max_results = self._max_results if max_results <= 0 else max_results # prepare params params = {'part': 'snippet', 'parentId': parent_id, 'textFormat': 'plainText', 'maxResults': str(max_results)} if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='comments', params=params, no_login=True) def get_channel_videos(self, channel_id, page_token=''): params = {'part': 'snippet', 'hl': self._language, 'maxResults': str(self._max_results), 'type': 'video', 'safeSearch': 'none', 'order': 'date'} if channel_id == 'mine': params['forMine'] = 'true' else: params['channelId'] = channel_id if page_token: params['pageToken'] = page_token return self.perform_v3_request(method='GET', path='search', params=params) def search(self, q, search_type=None, event_type='', channel_id='', order='relevance', safe_search='moderate', page_token='', location=False): if search_type is None: search_type = ['video', 'channel', 'playlist'] # prepare search type if not search_type: search_type = '' if isinstance(search_type, list): search_type = ','.join(search_type) # prepare page token if not page_token: page_token = '' # prepare params params = {'q': q, 'part': 'snippet', 'regionCode': self._region, 'hl': self._language, 'relevanceLanguage': self._language, 'maxResults': str(self._max_results)} if event_type and event_type in ['live', 'upcoming', 'completed']: params['eventType'] = event_type if search_type: params['type'] = search_type if channel_id: params['channelId'] = channel_id if order: params['order'] = order if safe_search: params['safeSearch'] = safe_search if page_token: params['pageToken'] = page_token video_only_params = ['eventType', 'videoCaption', 'videoCategoryId', 'videoDefinition', 'videoDimension', 'videoDuration', 'videoEmbeddable', 'videoLicense', 'videoSyndicated', 'videoType', 'relatedToVideoId', 'forMine'] for key in video_only_params: if params.get(key) is not None: params['type'] = 'video' break if params['type'] == 'video' and location: location = _context.get_settings().get_location() if location: params['location'] = location params['locationRadius'] = _context.get_settings().get_location_radius() return self.perform_v3_request(method='GET', path='search', params=params) def get_my_subscriptions(self, page_token=None, offset=0): if not page_token: page_token = '' result = {'items': [], 'next_page_token': page_token, 'offset': offset} def _perform(_page_token, _offset, _result): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'FEsubscriptions' } if _page_token: _post_data['continuation'] = _page_token _json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data) _data = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}])[0].get( 'shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {}) if not _data: _data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {}) _items = _data.get('items', []) if not _result: _result = {'items': []} _new_offset = self._max_results - len(_result['items']) + _offset if _offset > 0: _items = _items[_offset:] _result['offset'] = _new_offset for _item in _items: _item = _item.get('gridVideoRenderer', {}) if _item: _video_item = {'id': _item['videoId'], 'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''), 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')} _result['items'].append(_video_item) _continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '') if _continuations and len(_result['items']) <= self._max_results: _result['next_page_token'] = _continuations if len(_result['items']) < self._max_results: _result = _perform(_page_token=_continuations, _offset=0, _result=_result) # trim result if len(_result['items']) > self._max_results: _items = _result['items'] _items = _items[:self._max_results] _result['items'] = _items _result['continue'] = True if 'offset' in _result and _result['offset'] >= 100: _result['offset'] -= 100 if len(_result['items']) < self._max_results: if 'continue' in _result: del _result['continue'] if 'next_page_token' in _result: del _result['next_page_token'] if 'offset' in _result: del _result['offset'] return _result return _perform(_page_token=page_token, _offset=offset, _result=result) def get_purchases(self, page_token, offset): if not page_token: page_token = '' shelf_title = 'Purchases' result = {'items': [], 'next_page_token': page_token, 'offset': offset} def _perform(_page_token, _offset, _result, _shelf_index=None): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } } } if _page_token: _post_data['continuation'] = _page_token else: _post_data['browseId'] = 'FEmy_youtube' _json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data) _data = {} if 'continuationContents' in _json_data: _data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {}) elif 'contents' in _json_data: _contents = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}]) if _shelf_index is None: _shelf_index = get_shelf_index_by_title(_context, _json_data, shelf_title) if _shelf_index is not None: _data = _contents[_shelf_index].get('shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {}) _items = _data.get('items', []) if not _result: _result = {'items': []} _new_offset = self._max_results - len(_result['items']) + _offset if _offset > 0: _items = _items[_offset:] _result['offset'] = _new_offset for _listItem in _items: _item = _listItem.get('gridVideoRenderer', {}) if _item: _video_item = {'id': _item['videoId'], 'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''), 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')} _result['items'].append(_video_item) _item = _listItem.get('gridPlaylistRenderer', {}) if _item: play_next_page_token = '' while True: json_playlist_data = self.get_playlist_items(_item['playlistId'], page_token=play_next_page_token) _playListItems = json_playlist_data.get('items', {}) for _playListItem in _playListItems: _playListItem = _playListItem.get('snippet', {}) if _playListItem: _video_item = {'id': _playListItem.get('resourceId', {}).get('videoId', ''), 'title': _playListItem['title'], 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')} _result['items'].append(_video_item) play_next_page_token = json_playlist_data.get('nextPageToken', '') if not play_next_page_token or _context.abort_requested(): break _continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '') if _continuations and len(_result['items']) <= self._max_results: _result['next_page_token'] = _continuations if len(_result['items']) < self._max_results: _result = _perform(_page_token=_continuations, _offset=0, _result=_result, _shelf_index=shelf_index) # trim result if len(_result['items']) > self._max_results: _items = _result['items'] _items = _items[:self._max_results] _result['items'] = _items _result['continue'] = True if len(_result['items']) < self._max_results: if 'continue' in _result: del _result['continue'] if 'next_page_token' in _result: del _result['next_page_token'] if 'offset' in _result: del _result['offset'] return _result shelf_index = None if self._language != 'en' and not self._language.startswith('en-') and not page_token: # shelf index is a moving target, make a request in english first to find the correct index by title _en_post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': 'US', 'acceptLanguage': 'en-US' }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'FEmy_youtube' } json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_en_post_data) shelf_index = get_shelf_index_by_title(_context, json_data, shelf_title) result = _perform(_page_token=page_token, _offset=offset, _result=result, _shelf_index=shelf_index) return result def get_saved_playlists(self, page_token, offset): if not page_token: page_token = '' shelf_title = 'Saved Playlists' result = {'items': [], 'next_page_token': page_token, 'offset': offset} def _perform(_page_token, _offset, _result, _shelf_index=None): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } } } if _page_token: _post_data['continuation'] = _page_token else: _post_data['browseId'] = 'FEmy_youtube' _json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data) _data = {} if 'continuationContents' in _json_data: _data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {}) elif 'contents' in _json_data: _contents = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}]) if _shelf_index is None: _shelf_index = get_shelf_index_by_title(_context, _json_data, shelf_title) if _shelf_index is not None: _data = _contents[_shelf_index].get('shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {}) _items = _data.get('items', []) if not _result: _result = {'items': []} _new_offset = self._max_results - len(_result['items']) + _offset if _offset > 0: _items = _items[_offset:] _result['offset'] = _new_offset for _item in _items: _item = _item.get('gridPlaylistRenderer', {}) if _item: _video_item = {'id': _item['playlistId'], 'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''), 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', ''), 'channel_id': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('navigationEndpoint', {}).get('browseEndpoint', {}).get('browseId', ''), 'thumbnails': {'default': {'url': ''}, 'medium': {'url': ''}, 'high': {'url': ''}}} _thumbs = _item.get('thumbnail', {}).get('thumbnails', [{}]) for _thumb in _thumbs: _thumb_url = _thumb.get('url', '') if _thumb_url.startswith('//'): _thumb_url = ''.join(['https:', _thumb_url]) if _thumb_url.endswith('/default.jpg'): _video_item['thumbnails']['default']['url'] = _thumb_url elif _thumb_url.endswith('/mqdefault.jpg'): _video_item['thumbnails']['medium']['url'] = _thumb_url elif _thumb_url.endswith('/hqdefault.jpg'): _video_item['thumbnails']['high']['url'] = _thumb_url _result['items'].append(_video_item) _continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '') if _continuations and len(_result['items']) <= self._max_results: _result['next_page_token'] = _continuations if len(_result['items']) < self._max_results: _result = _perform(_page_token=_continuations, _offset=0, _result=_result, _shelf_index=_shelf_index) # trim result if len(_result['items']) > self._max_results: _items = _result['items'] _items = _items[:self._max_results] _result['items'] = _items _result['continue'] = True if len(_result['items']) < self._max_results: if 'continue' in _result: del _result['continue'] if 'next_page_token' in _result: del _result['next_page_token'] if 'offset' in _result: del _result['offset'] return _result shelf_index = None if self._language != 'en' and not self._language.startswith('en-') and not page_token: # shelf index is a moving target, make a request in english first to find the correct index by title _en_post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': 'US', 'acceptLanguage': 'en-US' }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'FEmy_youtube' } json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_en_post_data) shelf_index = get_shelf_index_by_title(_context, json_data, shelf_title) result = _perform(_page_token=page_token, _offset=offset, _result=result, _shelf_index=shelf_index) return result def clear_watch_history(self): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } } } _json_data = self.perform_v1_tv_request(method='POST', path='history/clear_watch_history', post_data=_post_data) return _json_data def get_watch_history(self, page_token=None, offset=0): if not page_token: page_token = '' result = {'items': [], 'next_page_token': page_token, 'offset': offset} def _perform(_page_token, _offset, _result): _post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': '%s' % self._region, 'acceptLanguage': '%s' % self._language.replace('_', '-') }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'FEhistory' } if _page_token: _post_data['continuation'] = _page_token _json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data) _data = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}])[0].get( 'shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {}) if not _data: _data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {}) _items = _data.get('items', []) if not _result: _result = {'items': []} _new_offset = self._max_results - len(_result['items']) + _offset if _offset > 0: _items = _items[_offset:] _result['offset'] = _new_offset for _item in _items: _item = _item.get('gridVideoRenderer', {}) if _item: _video_item = {'id': _item['videoId'], 'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''), 'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')} _result['items'].append(_video_item) _continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '') if _continuations and len(_result['items']) <= self._max_results: _result['next_page_token'] = _continuations if len(_result['items']) < self._max_results: _result = _perform(_page_token=_continuations, _offset=0, _result=_result) # trim result if len(_result['items']) > self._max_results: _items = _result['items'] _items = _items[:self._max_results] _result['items'] = _items _result['continue'] = True if len(_result['items']) < self._max_results: if 'continue' in _result: del _result['continue'] if 'next_page_token' in _result: del _result['next_page_token'] if 'offset' in _result: del _result['offset'] return _result return _perform(_page_token=page_token, _offset=offset, _result=result) def get_watch_later_id(self): watch_later_id = '' def _get_items(_continuation=None): post_data = { 'context': { 'client': { 'clientName': 'TVHTML5', 'clientVersion': '5.20150304', 'theme': 'CLASSIC', 'acceptRegion': 'US', 'acceptLanguage': 'en-US' }, 'user': { 'enableSafetyMode': False } }, 'browseId': 'default' } if _continuation: post_data['continuation'] = _continuation return self.perform_v1_tv_request(method='POST', path='browse', post_data=post_data) current_page = 1 pages = 30 # 28 seems to be page limit, add a couple page padding, loop will break when there is no next page data progress_dialog = _context.get_ui().create_progress_dialog(_context.get_name(), _context.localize(constants.localize.COMMON_PLEASE_WAIT), background=True) progress_dialog.set_total(pages) progress_dialog.update(steps=1, text=_context.localize(constants.localize.WATCH_LATER_RETRIEVAL_PAGE) % str(current_page)) try: json_data = _get_items() while current_page < pages: contents = json_data.get('contents', json_data.get('continuationContents', {})) section = contents.get('sectionListRenderer', contents.get('sectionListContinuation', {})) contents = section.get('contents', [{}]) for shelf in contents: renderer = shelf.get('shelfRenderer', {}) endpoint = renderer.get('endpoint', {}) browse_endpoint = endpoint.get('browseEndpoint', {}) browse_id = browse_endpoint.get('browseId', '') title = renderer.get('title', {}) title_runs = title.get('runs', [{}])[0] title_text = title_runs.get('text', '') if (title_text.lower() == 'watch later') and (browse_id.startswith('VLPL') or browse_id.startswith('PL')): watch_later_id = browse_id.lstrip('VL') break if watch_later_id: break continuations = section.get('continuations', [{}])[0] next_continuation_data = continuations.get('nextContinuationData', {}) continuation = next_continuation_data.get('continuation', '') if continuation: current_page += 1 progress_dialog.update(steps=1, text=_context.localize(constants.localize.WATCH_LATER_RETRIEVAL_PAGE) % str(current_page)) json_data = _get_items(continuation) continue else: break finally: progress_dialog.close() return watch_later_id def perform_v3_request(self, method='GET', headers=None, path=None, post_data=None, params=None, allow_redirects=True, no_login=False): yt_config = self._config if not yt_config.get('key'): return { 'error': { 'errors': [{'reason': 'accessNotConfigured'}], 'message': 'No API keys provided' } } # params if not params: params = {} _params = {'key': yt_config['key']} _params.update(params) # headers if not headers: headers = {} _headers = {'Host': 'www.googleapis.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36', 'Accept-Encoding': 'gzip, deflate'} # a config can decide if a token is allowed if self._access_token and yt_config.get('token-allowed', True) and not no_login: _headers['Authorization'] = 'Bearer %s' % self._access_token _headers.update(headers) # url _url = 'https://www.googleapis.com/youtube/v3/%s' % path.strip('/') result = None log_params = copy.deepcopy(params) if 'location' in log_params: log_params['location'] = 'xx.xxxx,xx.xxxx' _context.log_debug('[data] v3 request: |{0}| path: |{1}| params: |{2}| post_data: |{3}|'.format(method, path, log_params, post_data)) if method == 'GET': result = requests.get(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'POST': _headers['content-type'] = 'application/json' result = requests.post(_url, json=post_data, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'PUT': _headers['content-type'] = 'application/json' result = requests.put(_url, json=post_data, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'DELETE': result = requests.delete(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) if result is None: return {} if result.headers.get('content-type', '').startswith('application/json'): return result.json() def perform_v1_tv_request(self, method='GET', headers=None, path=None, post_data=None, params=None, allow_redirects=True): # params if not params: params = {} _params = {'key': self._config_tv['key']} _params.update(params) # headers if not headers: headers = {} _headers = {'Host': 'www.googleapis.com', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36', 'Origin': 'https://www.youtube.com', 'Accept': '*/*', 'DNT': '1', 'Referer': 'https://www.youtube.com/tv', 'Accept-Encoding': 'gzip', 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'} if self._access_token_tv: _headers['Authorization'] = 'Bearer %s' % self._access_token_tv _headers.update(headers) # url _url = 'https://www.googleapis.com/youtubei/v1/%s' % path.strip('/') result = None _context.log_debug('[i] v1 request: |{0}| path: |{1}| params: |{2}| post_data: |{3}|'.format(method, path, params, post_data)) if method == 'GET': result = requests.get(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'POST': _headers['content-type'] = 'application/json' result = requests.post(_url, json=post_data, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'PUT': _headers['content-type'] = 'application/json' result = requests.put(_url, json=post_data, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) elif method == 'DELETE': result = requests.delete(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects) if result is None: return {} if result.headers.get('content-type', '').startswith('application/json'): return result.json()
true
true
1c359333395c19ed322fc04a147f52e90640146d
574
py
Python
runtests.py
lworkneh/NS
560145f5f1e4f0a5a3690470939a9cd48d9421bf
[ "MIT", "Unlicense" ]
1
2016-02-13T15:41:53.000Z
2016-02-13T15:41:53.000Z
runtests.py
lworkneh/NS
560145f5f1e4f0a5a3690470939a9cd48d9421bf
[ "MIT", "Unlicense" ]
6
2020-06-05T19:26:57.000Z
2022-03-11T23:33:08.000Z
runtests.py
lworkneh/NS
560145f5f1e4f0a5a3690470939a9cd48d9421bf
[ "MIT", "Unlicense" ]
null
null
null
import os import sys os.environ['DJANGO_SETTINGS_MODULE'] = 'test-settings' test_dir = os.path.dirname(__file__) sys.path.insert(0, test_dir) import django from django.test.utils import get_runner from django.conf import settings def runtests(): if django.VERSION >= (1, 7): django.setup() TestRunner = get_runner(settings) test_runner = TestRunner(verbosity=1, interactive=True) failures = test_runner.run_tests( ['quiz', 'essay', 'multichoice', 'true_false'] ) sys.exit(bool(failures)) if __name__ == '__main__': runtests()
23.916667
59
0.700348
import os import sys os.environ['DJANGO_SETTINGS_MODULE'] = 'test-settings' test_dir = os.path.dirname(__file__) sys.path.insert(0, test_dir) import django from django.test.utils import get_runner from django.conf import settings def runtests(): if django.VERSION >= (1, 7): django.setup() TestRunner = get_runner(settings) test_runner = TestRunner(verbosity=1, interactive=True) failures = test_runner.run_tests( ['quiz', 'essay', 'multichoice', 'true_false'] ) sys.exit(bool(failures)) if __name__ == '__main__': runtests()
true
true
1c3594191edf0b0fdc9949767ff6f24f088bd74f
1,124
py
Python
WEB SCRAPING PYTHON/webscraping4.py
nihilboy1455/CURSO-WEB-SCRAPING-PYTHON
c76f196a3ac13ddd36142b047880e3b2339c333f
[ "MIT" ]
null
null
null
WEB SCRAPING PYTHON/webscraping4.py
nihilboy1455/CURSO-WEB-SCRAPING-PYTHON
c76f196a3ac13ddd36142b047880e3b2339c333f
[ "MIT" ]
null
null
null
WEB SCRAPING PYTHON/webscraping4.py
nihilboy1455/CURSO-WEB-SCRAPING-PYTHON
c76f196a3ac13ddd36142b047880e3b2339c333f
[ "MIT" ]
null
null
null
# > EXEMPLO # - Obtendo produtos do Mercado Livre a partir de uma busca realizada pelo usuário import requests from bs4 import BeautifulSoup url_base = 'https://lista.mercadolivre.com.br/' produto_nome = input('Qual produto você deseja? ') response = requests.get(url_base + produto_nome) site = BeautifulSoup(response.text, 'html.parser') produtos = site.findAll('div', attrs={'class': 'andes-card andes-card--flat andes-card--default ui-search-result ui-search-result--core andes-card--padding-default'}) for produto in produtos: titulo = produto.find('h2', attrs={'class': 'ui-search-item__title'}) link = produto.find('a', attrs={'class': 'ui-search-link'}) real = produto.find('span', attrs={'class': 'price-tag-fraction'}) centavos = produto.find('span', attrs={'class': 'price-tag-cents'}) print(produto.prettify()) print('Título do produto:', titulo.text) print('Link do produto:', link['href']) if (centavos): print('Preço do produto: R$', real.text + ',' + centavos.text) else: print('Preço do produto: R$', real.text) print('\n\n') break
32.114286
166
0.672598
import requests from bs4 import BeautifulSoup url_base = 'https://lista.mercadolivre.com.br/' produto_nome = input('Qual produto você deseja? ') response = requests.get(url_base + produto_nome) site = BeautifulSoup(response.text, 'html.parser') produtos = site.findAll('div', attrs={'class': 'andes-card andes-card--flat andes-card--default ui-search-result ui-search-result--core andes-card--padding-default'}) for produto in produtos: titulo = produto.find('h2', attrs={'class': 'ui-search-item__title'}) link = produto.find('a', attrs={'class': 'ui-search-link'}) real = produto.find('span', attrs={'class': 'price-tag-fraction'}) centavos = produto.find('span', attrs={'class': 'price-tag-cents'}) print(produto.prettify()) print('Título do produto:', titulo.text) print('Link do produto:', link['href']) if (centavos): print('Preço do produto: R$', real.text + ',' + centavos.text) else: print('Preço do produto: R$', real.text) print('\n\n') break
true
true
1c35945136e0c8709847e01b2b465d1a75b5ecf0
3,481
py
Python
data/process_data.py
GauravKParmar/Disaster-Response-Pipeline
740aaa9e2062662841add2daa981d5177abda021
[ "MIT" ]
null
null
null
data/process_data.py
GauravKParmar/Disaster-Response-Pipeline
740aaa9e2062662841add2daa981d5177abda021
[ "MIT" ]
null
null
null
data/process_data.py
GauravKParmar/Disaster-Response-Pipeline
740aaa9e2062662841add2daa981d5177abda021
[ "MIT" ]
null
null
null
import sys import pandas as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): """ Loads data from given filepaths and merges them. Parameters: messages_filepath (str): messages filepath categories_filepath (str): categories filepath Returns: df (DataFrame): combined dataframe """ # reading csv files using pandas messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) # merging the two dataframes df = pd.merge(messages, categories, on="id") return df def clean_data(df): """ Creates new individual category columns by splitting values from categories column and drops duplicate rows. Parameters: df (DataFrame): dataframe to be cleaned. Returns: df (DataFrame): cleaned dataframe with new columns. """ # creating a dataframe of the 36 individual category columns categories = df['categories'].str.split(';', expand=True) # selecting the first row of the categories dataframe to extract column names row = categories.iloc[0,:] # creating a list of category column names category_colnames = row.apply(lambda x : x[:-2]) # renaming the columns of 'categories' dataframe categories.columns = category_colnames for column in categories: # set each value to be the last character of the string categories[column] = categories[column].astype(str).str[-1] # convert column from string to numeric categories[column] = pd.to_numeric(categories[column]) # replacing categories column in df with new category columns. df.drop('categories', inplace=True, axis=1) # concatenating the original dataframe with the new 'categories' dataframe df = pd.concat([df, categories], axis=1) # removing duplicates df.drop_duplicates(inplace=True) return df def save_data(df, database_filename): """ Saves the dataframe to a database. Parameters: df (DataFrame): dataframe to be stored. database_filename (str) : database filename. """ # initiating SQLAlchemy Engine engine = create_engine('sqlite:///'+database_filename) # using pandas to save the DataFrame to the database df.to_sql(database_filename[:-3], engine, index=False) def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths of the messages and categories '\ 'datasets as the first and second argument respectively, as '\ 'well as the filepath of the database to save the cleaned data '\ 'to as the third argument. \n\nExample: python process_data.py '\ 'disaster_messages.csv disaster_categories.csv '\ 'DisasterResponse.db') if __name__ == '__main__': main()
29.752137
81
0.652111
import sys import pandas as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) df = pd.merge(messages, categories, on="id") return df def clean_data(df): categories = df['categories'].str.split(';', expand=True) row = categories.iloc[0,:] category_colnames = row.apply(lambda x : x[:-2]) categories.columns = category_colnames for column in categories: categories[column] = categories[column].astype(str).str[-1] categories[column] = pd.to_numeric(categories[column]) df.drop('categories', inplace=True, axis=1) df = pd.concat([df, categories], axis=1) df.drop_duplicates(inplace=True) return df def save_data(df, database_filename): engine = create_engine('sqlite:///'+database_filename) df.to_sql(database_filename[:-3], engine, index=False) def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths of the messages and categories '\ 'datasets as the first and second argument respectively, as '\ 'well as the filepath of the database to save the cleaned data '\ 'to as the third argument. \n\nExample: python process_data.py '\ 'disaster_messages.csv disaster_categories.csv '\ 'DisasterResponse.db') if __name__ == '__main__': main()
true
true