hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fd9571c1ea46ab8032fc16bb1afb0ea68e13c1 | 943 | py | Python | gans/experiments/emnist/preprocessing/filtered_emnist_data_utils_test.py | garyxcheng/federated | ba7133ead6127af71ea9356e26bfd05c02f8324a | [
"Apache-2.0"
] | 330 | 2020-09-14T23:10:16.000Z | 2022-03-30T19:49:19.000Z | gans/experiments/emnist/preprocessing/filtered_emnist_data_utils_test.py | garyxcheng/federated | ba7133ead6127af71ea9356e26bfd05c02f8324a | [
"Apache-2.0"
] | 52 | 2020-09-30T06:10:51.000Z | 2022-03-31T19:25:16.000Z | gans/experiments/emnist/preprocessing/filtered_emnist_data_utils_test.py | garyxcheng/federated | ba7133ead6127af71ea9356e26bfd05c02f8324a | [
"Apache-2.0"
] | 119 | 2020-09-24T04:54:46.000Z | 2022-03-31T21:46:57.000Z | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from gans.experiments.emnist.preprocessing import filtered_emnist_data_utils
class FilteredEmnistDataUtilsTest(tf.test.TestCase):
def test_get_filtered_client_data_for_training(self):
filtered_emnist_data_utils.get_filtered_client_data_for_training(
None, None, batch_size=10)
if __name__ == '__main__':
tf.test.main()
| 32.517241 | 76 | 0.778367 |
import tensorflow as tf
from gans.experiments.emnist.preprocessing import filtered_emnist_data_utils
class FilteredEmnistDataUtilsTest(tf.test.TestCase):
def test_get_filtered_client_data_for_training(self):
filtered_emnist_data_utils.get_filtered_client_data_for_training(
None, None, batch_size=10)
if __name__ == '__main__':
tf.test.main()
| true | true |
f7fd95c4fa958dae0d1b4c43138630a897afb4e9 | 4,645 | py | Python | python3/erfr-keygen.py | urbanware-org/erfr | 3f82f42092923b6f03597a54a4d3a8e1cb771d61 | [
"MIT"
] | 2 | 2018-04-20T20:13:25.000Z | 2018-06-18T18:37:16.000Z | python3/erfr-keygen.py | urbanware-org/erfr | 3f82f42092923b6f03597a54a4d3a8e1cb771d61 | [
"MIT"
] | null | null | null | python3/erfr-keygen.py | urbanware-org/erfr | 3f82f42092923b6f03597a54a4d3a8e1cb771d61 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ============================================================================
# Erfr - One-time pad encryption tool
# Key Generator script
# Copyright (C) 2018 by Ralf Kilian
# Distributed under the MIT License (https://opensource.org/licenses/MIT)
#
# Website: http://www.urbanware.org
# GitHub: https://github.com/urbanware-org/erfr
# ============================================================================
import os
import sys
def main():
from core import clap
from core import common
from core import keyfile
from datetime import datetime as dt
try:
p = clap.Parser()
except Exception as e:
print("%s: error: %s" % (os.path.basename(sys.argv[0]), e))
sys.exit(1)
p.set_description("Generate key files which can either be used for " \
"encryption or obfuscation purposes (as fake key " \
"files).")
p.set_epilog("Further information and usage examples can be found " \
"inside the documentation file for this script.")
# Define required arguments
p.add_avalue("-s", "--key-size", "key size in bytes", "key_size", None,
True)
# Define optional arguments
p.add_switch(None, "--base64", "generate Base64 key string", "base64",
True, False)
p.add_avalue("-b", "--buffer-size", "buffer size in bytes", "buffer_size",
4096, False)
p.add_switch(None, "--dev-random", "use \"/dev/random\" as random " \
"number generator (Unix-like systems only)", "dev_random",
True, False)
p.add_switch(None, "--fortuna", "use Fortuna as random number generator",
"fortuna", True, False)
p.add_switch("-h", "--help", "print this help message and exit", None,
True, False)
p.add_avalue("-k", "--key-file", "key file path", "key_file", None, False)
p.add_switch(None, "--overwrite", "overwrite existing file", "overwrite",
True, False)
p.add_avalue("-p", "--parts", "split key into separate parts", "parts", 1,
False)
p.add_avalue("-t", "--task-id", "user-defined task ID", "task_id", None,
False)
p.add_switch(None, "--version", "print the version number and exit", None,
True, False)
if len(sys.argv) == 1:
p.error("At least one required argument is missing.")
elif ("-h" in sys.argv) or ("--help" in sys.argv):
p.print_help()
sys.exit(0)
elif "--version" in sys.argv:
print(keyfile.get_version())
sys.exit(0)
args = p.parse_args()
if not args.base64 and args.key_file == None:
p.error("The argument to either generate a key file or a Base64 " \
"encoded key string is missing.")
elif args.base64 and not args.key_file == None:
p.error("The arguments to generate a key file and a Base64 key " \
"string cannot be given at the same time.")
elif args.base64 and args.overwrite:
p.error("The overwrite argument does not make any sense when " \
"generating a Base64 string.")
elif args.base64 and args.parts > 1:
p.error("The parts argument does not make any sense when " \
"generating a Base64 string.")
if args.base64:
if not args.task_id == None:
p.error("No task ID can be given when creating a Base64 key " \
"string.")
try:
print(keyfile.generate_key_string(args.key_size, args.dev_random,
args.fortuna))
except Exception as e:
p.error(e)
else:
try:
task_id = common.get_task_id(args.task_id)
except Exception as e:
task_id = args.task_id
p.error(e)
try:
timestamp = dt.now()
common.status(task_id, "key generation", "start")
keyfile.generate_key_file(task_id, args.key_file, args.key_size,
args.buffer_size, 0, False,
args.dev_random, args.fortuna,
args.overwrite, args.parts)
common.status(task_id, "key generation", "finish")
print("Elapsed time: %s" % (dt.now() - timestamp))
except Exception as e:
common.status(task_id, "key generation", "cancel")
p.error(e)
finally:
common.delete_temp_files(task_id)
if __name__ == "__main__":
main()
# EOF
| 38.708333 | 78 | 0.54704 |
import os
import sys
def main():
from core import clap
from core import common
from core import keyfile
from datetime import datetime as dt
try:
p = clap.Parser()
except Exception as e:
print("%s: error: %s" % (os.path.basename(sys.argv[0]), e))
sys.exit(1)
p.set_description("Generate key files which can either be used for " \
"encryption or obfuscation purposes (as fake key " \
"files).")
p.set_epilog("Further information and usage examples can be found " \
"inside the documentation file for this script.")
p.add_avalue("-s", "--key-size", "key size in bytes", "key_size", None,
True)
p.add_switch(None, "--base64", "generate Base64 key string", "base64",
True, False)
p.add_avalue("-b", "--buffer-size", "buffer size in bytes", "buffer_size",
4096, False)
p.add_switch(None, "--dev-random", "use \"/dev/random\" as random " \
"number generator (Unix-like systems only)", "dev_random",
True, False)
p.add_switch(None, "--fortuna", "use Fortuna as random number generator",
"fortuna", True, False)
p.add_switch("-h", "--help", "print this help message and exit", None,
True, False)
p.add_avalue("-k", "--key-file", "key file path", "key_file", None, False)
p.add_switch(None, "--overwrite", "overwrite existing file", "overwrite",
True, False)
p.add_avalue("-p", "--parts", "split key into separate parts", "parts", 1,
False)
p.add_avalue("-t", "--task-id", "user-defined task ID", "task_id", None,
False)
p.add_switch(None, "--version", "print the version number and exit", None,
True, False)
if len(sys.argv) == 1:
p.error("At least one required argument is missing.")
elif ("-h" in sys.argv) or ("--help" in sys.argv):
p.print_help()
sys.exit(0)
elif "--version" in sys.argv:
print(keyfile.get_version())
sys.exit(0)
args = p.parse_args()
if not args.base64 and args.key_file == None:
p.error("The argument to either generate a key file or a Base64 " \
"encoded key string is missing.")
elif args.base64 and not args.key_file == None:
p.error("The arguments to generate a key file and a Base64 key " \
"string cannot be given at the same time.")
elif args.base64 and args.overwrite:
p.error("The overwrite argument does not make any sense when " \
"generating a Base64 string.")
elif args.base64 and args.parts > 1:
p.error("The parts argument does not make any sense when " \
"generating a Base64 string.")
if args.base64:
if not args.task_id == None:
p.error("No task ID can be given when creating a Base64 key " \
"string.")
try:
print(keyfile.generate_key_string(args.key_size, args.dev_random,
args.fortuna))
except Exception as e:
p.error(e)
else:
try:
task_id = common.get_task_id(args.task_id)
except Exception as e:
task_id = args.task_id
p.error(e)
try:
timestamp = dt.now()
common.status(task_id, "key generation", "start")
keyfile.generate_key_file(task_id, args.key_file, args.key_size,
args.buffer_size, 0, False,
args.dev_random, args.fortuna,
args.overwrite, args.parts)
common.status(task_id, "key generation", "finish")
print("Elapsed time: %s" % (dt.now() - timestamp))
except Exception as e:
common.status(task_id, "key generation", "cancel")
p.error(e)
finally:
common.delete_temp_files(task_id)
if __name__ == "__main__":
main()
| true | true |
f7fd95cc368ddbd403306e4b95518d6b994b0ad0 | 984 | py | Python | django_event_store/client.py | Magni77/django-event-store | cc52d4748fced42df0cc49f8a4f72368595809df | [
"MIT"
] | 4 | 2021-11-24T20:51:11.000Z | 2021-11-25T15:37:38.000Z | django_event_store/client.py | mgodkowicz/django-event-store | cc52d4748fced42df0cc49f8a4f72368595809df | [
"MIT"
] | 1 | 2021-11-25T08:15:37.000Z | 2021-11-25T08:15:37.000Z | django_event_store/client.py | mgodkowicz/django-event-store | cc52d4748fced42df0cc49f8a4f72368595809df | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import Callable, Optional
from django_event_store.event_repository import DjangoEventRepository
from event_store import Client as EsClient
from event_store import Dispatcher, EventsRepository, Subscriptions
from event_store.dispatcher import DispatcherBase
from event_store.mappers.default import Default
from event_store.mappers.pipeline_mapper import PipelineMapper
class Client(EsClient):
def __init__(
self,
repository: Optional[EventsRepository] = None,
subscriptions: Optional[Subscriptions] = None,
dispatcher: DispatcherBase = Dispatcher(),
mapper: Optional[PipelineMapper] = None,
clock: Callable = datetime.now,
):
super().__init__(
repository=repository or DjangoEventRepository(),
subscriptions=subscriptions or Subscriptions(),
dispatcher=dispatcher,
mapper=mapper or Default(),
clock=clock,
)
| 35.142857 | 69 | 0.723577 | from datetime import datetime
from typing import Callable, Optional
from django_event_store.event_repository import DjangoEventRepository
from event_store import Client as EsClient
from event_store import Dispatcher, EventsRepository, Subscriptions
from event_store.dispatcher import DispatcherBase
from event_store.mappers.default import Default
from event_store.mappers.pipeline_mapper import PipelineMapper
class Client(EsClient):
def __init__(
self,
repository: Optional[EventsRepository] = None,
subscriptions: Optional[Subscriptions] = None,
dispatcher: DispatcherBase = Dispatcher(),
mapper: Optional[PipelineMapper] = None,
clock: Callable = datetime.now,
):
super().__init__(
repository=repository or DjangoEventRepository(),
subscriptions=subscriptions or Subscriptions(),
dispatcher=dispatcher,
mapper=mapper or Default(),
clock=clock,
)
| true | true |
f7fd962d4d49ee1ab3ab47e910296055c7268246 | 64,380 | py | Python | python/ccxt/kraken.py | jknight/ccxt | 02cdef0247435a6c6557faad8a1793d3da67c085 | [
"MIT"
] | null | null | null | python/ccxt/kraken.py | jknight/ccxt | 02cdef0247435a6c6557faad8a1793d3da67c085 | [
"MIT"
] | null | null | null | python/ccxt/kraken.py | jknight/ccxt | 02cdef0247435a6c6557faad8a1793d3da67c085 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import base64
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
class kraken(Exchange):
def describe(self):
return self.deep_extend(super(kraken, self).describe(), {
'id': 'kraken',
'name': 'Kraken',
'countries': ['US'],
'version': '0',
'rateLimit': 3000,
'certified': True,
'pro': True,
'has': {
'createDepositAddress': True,
'fetchDepositAddress': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'CORS': False,
'fetchCurrencies': True,
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchWithdrawals': True,
'fetchDeposits': True,
'withdraw': True,
'fetchLedgerEntry': True,
'fetchLedger': True,
'fetchOrderTrades': 'emulated',
'fetchTime': True,
},
'marketsByAltname': {},
'timeframes': {
'1m': 1,
'5m': 5,
'15m': 15,
'30m': 30,
'1h': 60,
'4h': 240,
'1d': 1440,
'1w': 10080,
'2w': 21600,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/76173629-fc67fb00-61b1-11ea-84fe-f2de582f58a3.jpg',
'api': {
'public': 'https://api.kraken.com',
'private': 'https://api.kraken.com',
'zendesk': 'https://kraken.zendesk.com/api/v2/help_center/en-us/articles', # use the public zendesk api to receive article bodies and bypass new anti-spam protections
},
'www': 'https://www.kraken.com',
'doc': 'https://www.kraken.com/features/api',
'fees': 'https://www.kraken.com/en-us/features/fee-schedule',
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.26 / 100,
'maker': 0.16 / 100,
'tiers': {
'taker': [
[0, 0.0026],
[50000, 0.0024],
[100000, 0.0022],
[250000, 0.0020],
[500000, 0.0018],
[1000000, 0.0016],
[2500000, 0.0014],
[5000000, 0.0012],
[10000000, 0.0001],
],
'maker': [
[0, 0.0016],
[50000, 0.0014],
[100000, 0.0012],
[250000, 0.0010],
[500000, 0.0008],
[1000000, 0.0006],
[2500000, 0.0004],
[5000000, 0.0002],
[10000000, 0.0],
],
},
},
# self is a bad way of hardcoding fees that change on daily basis
# hardcoding is now considered obsolete, we will remove all of it eventually
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'ETH': 0.005,
'XRP': 0.02,
'XLM': 0.00002,
'LTC': 0.02,
'DOGE': 2,
'ZEC': 0.00010,
'ICN': 0.02,
'REP': 0.01,
'ETC': 0.005,
'MLN': 0.003,
'XMR': 0.05,
'DASH': 0.005,
'GNO': 0.01,
'EOS': 0.5,
'BCH': 0.001,
'XTZ': 0.05,
'USD': 5, # if domestic wire
'EUR': 5, # if domestic wire
'CAD': 10, # CAD EFT Withdrawal
'JPY': 300, # if domestic wire
},
'deposit': {
'BTC': 0,
'ETH': 0,
'XRP': 0,
'XLM': 0,
'LTC': 0,
'DOGE': 0,
'ZEC': 0,
'ICN': 0,
'REP': 0,
'ETC': 0,
'MLN': 0,
'XMR': 0,
'DASH': 0,
'GNO': 0,
'EOS': 0,
'BCH': 0,
'XTZ': 0.05,
'USD': 5, # if domestic wire
'EUR': 0, # free deposit if EUR SEPA Deposit
'CAD': 5, # if domestic wire
'JPY': 0, # Domestic Deposit(Free, ¥5,000 deposit minimum)
},
},
},
'api': {
'zendesk': {
'get': [
# we should really refrain from putting fixed fee numbers and stop hardcoding
# we will be using their web APIs to scrape all numbers from these articles
'205893708', # -What-is-the-minimum-order-size-
'360000292886', # -What-are-the-deposit-fees-
'201893608', # -What-are-the-withdrawal-fees-
],
},
'public': {
'get': [
'Assets',
'AssetPairs',
'Depth',
'OHLC',
'Spread',
'Ticker',
'Time',
'Trades',
],
},
'private': {
'post': [
'AddOrder',
'AddExport',
'Balance',
'CancelOrder',
'ClosedOrders',
'DepositAddresses',
'DepositMethods',
'DepositStatus',
'ExportStatus',
'GetWebSocketsToken',
'Ledgers',
'OpenOrders',
'OpenPositions',
'QueryLedgers',
'QueryOrders',
'QueryTrades',
'RetrieveExport',
'RemoveExport',
'TradeBalance',
'TradesHistory',
'TradeVolume',
'Withdraw',
'WithdrawCancel',
'WithdrawInfo',
'WithdrawStatus',
],
},
},
'commonCurrencies': {
'XBT': 'BTC',
'XDG': 'DOGE',
},
'options': {
'cacheDepositMethodsOnFetchDepositAddress': True, # will issue up to two calls in fetchDepositAddress
'depositMethods': {},
'delistedMarketsById': {},
# cannot withdraw/deposit these
'inactiveCurrencies': ['CAD', 'USD', 'JPY', 'GBP'],
'fetchMinOrderAmounts': True,
},
'exceptions': {
'EQuery:Invalid asset pair': BadSymbol, # {"error":["EQuery:Invalid asset pair"]}
'EAPI:Invalid key': AuthenticationError,
'EFunding:Unknown withdraw key': ExchangeError,
'EFunding:Invalid amount': InsufficientFunds,
'EService:Unavailable': ExchangeNotAvailable,
'EDatabase:Internal error': ExchangeNotAvailable,
'EService:Busy': ExchangeNotAvailable,
'EQuery:Unknown asset': ExchangeError,
'EAPI:Rate limit exceeded': DDoSProtection,
'EOrder:Rate limit exceeded': DDoSProtection,
'EGeneral:Internal error': ExchangeNotAvailable,
'EGeneral:Temporary lockout': DDoSProtection,
'EGeneral:Permission denied': PermissionDenied,
},
})
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['price'], DECIMAL_PLACES)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def fetch_min_order_amounts(self, params={}):
response = self.zendeskGet205893708(params)
article = self.safe_value(response, 'article')
html = self.safe_string(article, 'body')
parts = html.split('<td class="wysiwyg-text-align-right">')
numParts = len(parts)
if numParts < 3:
raise NotSupported(self.id + ' fetchMinOrderAmounts HTML page markup has changed: https://kraken.zendesk.com/api/v2/help_center/en-us/articles/205893708')
result = {}
# skip the part before the header and the header itself
for i in range(2, len(parts)):
part = parts[i]
chunks = part.split('</td>')
amountAndCode = chunks[0]
if amountAndCode != 'To Be Announced':
pieces = amountAndCode.split(' ')
numPieces = len(pieces)
if numPieces == 2:
amount = float(pieces[0])
code = self.safe_currency_code(pieces[1])
result[code] = amount
return result
def fetch_markets(self, params={}):
response = self.publicGetAssetPairs(params)
#
# {
# "error":[],
# "result":{
# "ADAETH":{
# "altname":"ADAETH",
# "wsname":"ADA\/ETH",
# "aclass_base":"currency",
# "base":"ADA",
# "aclass_quote":"currency",
# "quote":"XETH",
# "lot":"unit",
# "pair_decimals":7,
# "lot_decimals":8,
# "lot_multiplier":1,
# "leverage_buy":[],
# "leverage_sell":[],
# "fees":[
# [0,0.26],
# [50000,0.24],
# [100000,0.22],
# [250000,0.2],
# [500000,0.18],
# [1000000,0.16],
# [2500000,0.14],
# [5000000,0.12],
# [10000000,0.1]
# ],
# "fees_maker":[
# [0,0.16],
# [50000,0.14],
# [100000,0.12],
# [250000,0.1],
# [500000,0.08],
# [1000000,0.06],
# [2500000,0.04],
# [5000000,0.02],
# [10000000,0]
# ],
# "fee_volume_currency":"ZUSD",
# "margin_call":80,
# "margin_stop":40
# },
# }
# }
#
fetchMinOrderAmounts = self.safe_value(self.options, 'fetchMinOrderAmounts', False)
limits = {}
if fetchMinOrderAmounts:
limits = self.fetch_min_order_amounts()
keys = list(response['result'].keys())
result = []
for i in range(0, len(keys)):
id = keys[i]
market = response['result'][id]
baseId = market['base']
quoteId = market['quote']
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
darkpool = id.find('.d') >= 0
symbol = market['altname'] if darkpool else (base + '/' + quote)
maker = None
if 'fees_maker' in market:
maker = float(market['fees_maker'][0][1]) / 100
precision = {
'amount': market['lot_decimals'],
'price': market['pair_decimals'],
}
minAmount = math.pow(10, -precision['amount'])
if base in limits:
minAmount = limits[base]
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'darkpool': darkpool,
'info': market,
'altname': market['altname'],
'maker': maker,
'taker': float(market['fees'][0][1]) / 100,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
})
result = self.append_inactive_markets(result)
self.marketsByAltname = self.index_by(result, 'altname')
return result
def safe_currency_code(self, currencyId, currency=None):
if len(currencyId) > 3:
if (currencyId.find('X') == 0) or (currencyId.find('Z') == 0):
currencyId = currencyId[1:]
return super(kraken, self).safe_currency_code(currencyId, currency)
def append_inactive_markets(self, result):
# result should be an array to append to
precision = {'amount': 8, 'price': 8}
costLimits = {'min': 0, 'max': None}
priceLimits = {'min': math.pow(10, -precision['price']), 'max': None}
amountLimits = {'min': math.pow(10, -precision['amount']), 'max': math.pow(10, precision['amount'])}
limits = {'amount': amountLimits, 'price': priceLimits, 'cost': costLimits}
defaults = {
'darkpool': False,
'info': None,
'maker': None,
'taker': None,
'active': False,
'precision': precision,
'limits': limits,
}
markets = [
# {'id': 'XXLMZEUR', 'symbol': 'XLM/EUR', 'base': 'XLM', 'quote': 'EUR', 'altname': 'XLMEUR'},
]
for i in range(0, len(markets)):
result.append(self.extend(defaults, markets[i]))
return result
def fetch_currencies(self, params={}):
response = self.publicGetAssets(params)
#
# {
# "error": [],
# "result": {
# "ADA": {"aclass": "currency", "altname": "ADA", "decimals": 8, "display_decimals": 6},
# "BCH": {"aclass": "currency", "altname": "BCH", "decimals": 10, "display_decimals": 5},
# ...
# },
# }
#
currencies = self.safe_value(response, 'result')
ids = list(currencies.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = currencies[id]
# todo: will need to rethink the fees
# see: https://support.kraken.com/hc/en-us/articles/201893608-What-are-the-withdrawal-fees-
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
code = self.safe_currency_code(self.safe_string(currency, 'altname'))
precision = self.safe_integer(currency, 'decimals')
# assumes all currencies are active except those listed above
active = not self.in_array(code, self.options['inactiveCurrencies'])
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': code,
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
}
return result
def fetch_trading_fees(self, params={}):
self.load_markets()
self.check_required_credentials()
response = self.privatePostTradeVolume(params)
tradedVolume = self.safe_float(response['result'], 'volume')
tiers = self.fees['trading']['tiers']
taker = tiers['taker'][1]
maker = tiers['maker'][1]
for i in range(0, len(tiers['taker'])):
if tradedVolume >= tiers['taker'][i][0]:
taker = tiers['taker'][i][1]
for i in range(0, len(tiers['maker'])):
if tradedVolume >= tiers['maker'][i][0]:
maker = tiers['maker'][i][1]
return {
'info': response,
'maker': maker,
'taker': taker,
}
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1):
price = self.safe_float(bidask, priceKey)
amount = self.safe_float(bidask, amountKey)
timestamp = self.safe_integer(bidask, 2)
return [price, amount, timestamp]
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
if market['darkpool']:
raise ExchangeError(self.id + ' does not provide an order book for darkpool symbol ' + symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['count'] = limit # 100
response = self.publicGetDepth(self.extend(request, params))
#
# {
# "error":[],
# "result":{
# "XETHXXBT":{
# "asks":[
# ["0.023480","4.000",1586321307],
# ["0.023490","50.095",1586321306],
# ["0.023500","28.535",1586321302],
# ],
# "bids":[
# ["0.023470","59.580",1586321307],
# ["0.023460","20.000",1586321301],
# ["0.023440","67.832",1586321306],
# ]
# }
# }
# }
#
result = self.safe_value(response, 'result', {})
orderbook = self.safe_value(result, market['id'])
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
baseVolume = float(ticker['v'][1])
vwap = float(ticker['p'][1])
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = float(ticker['c'][0])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['h'][1]),
'low': float(ticker['l'][1]),
'bid': float(ticker['b'][0]),
'bidVolume': None,
'ask': float(ticker['a'][0]),
'askVolume': None,
'vwap': vwap,
'open': self.safe_float(ticker, 'o'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
symbols = self.symbols if (symbols is None) else symbols
marketIds = []
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
market = self.markets[symbol]
if market['active'] and not market['darkpool']:
marketIds.append(market['id'])
request = {
'pair': ','.join(marketIds),
}
response = self.publicGetTicker(self.extend(request, params))
tickers = response['result']
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
if self.in_array(symbol, symbols):
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
darkpool = symbol.find('.d') >= 0
if darkpool:
raise ExchangeError(self.id + ' does not provide a ticker for darkpool symbol ' + symbol)
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetTicker(self.extend(request, params))
ticker = response['result'][market['id']]
return self.parse_ticker(ticker, market)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
#
# [
# 1591475640,
# "0.02500",
# "0.02500",
# "0.02500",
# "0.02500",
# "0.02500",
# "9.12201000",
# 5
# ]
#
return [
self.safe_timestamp(ohlcv, 0),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 6),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'interval': self.timeframes[timeframe],
}
if since is not None:
request['since'] = int((since - 1) / 1000)
response = self.publicGetOHLC(self.extend(request, params))
#
# {
# "error":[],
# "result":{
# "XETHXXBT":[
# [1591475580,"0.02499","0.02499","0.02499","0.02499","0.00000","0.00000000",0],
# [1591475640,"0.02500","0.02500","0.02500","0.02500","0.02500","9.12201000",5],
# [1591475700,"0.02499","0.02499","0.02499","0.02499","0.02499","1.28681415",2],
# [1591475760,"0.02499","0.02499","0.02499","0.02499","0.02499","0.08800000",1],
# ],
# "last":1591517580
# }
# }
result = self.safe_value(response, 'result', {})
ohlcvs = self.safe_value(result, market['id'], [])
return self.parse_ohlcvs(ohlcvs, market)
def parse_ledger_entry_type(self, type):
types = {
'trade': 'trade',
'withdrawal': 'transaction',
'deposit': 'transaction',
'transfer': 'transfer',
'margin': 'margin',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# 'LTFK7F-N2CUX-PNY4SX': {
# refid: "TSJTGT-DT7WN-GPPQMJ",
# time: 1520102320.555,
# type: "trade",
# aclass: "currency",
# asset: "XETH",
# amount: "0.1087194600",
# fee: "0.0000000000",
# balance: "0.2855851000"
# },
# ...
# }
#
id = self.safe_string(item, 'id')
direction = None
account = None
referenceId = self.safe_string(item, 'refid')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'asset'), currency)
amount = self.safe_float(item, 'amount')
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
time = self.safe_float(item, 'time')
timestamp = None
if time is not None:
timestamp = int(time * 1000)
fee = {
'cost': self.safe_float(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_float(item, 'balance')
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
# https://www.kraken.com/features/api#get-ledgers-info
self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['start'] = int(since / 1000)
response = self.privatePostLedgers(self.extend(request, params))
# { error: [],
# result: {ledger: {'LPUAIB-TS774-UKHP7X': { refid: "A2B4HBV-L4MDIE-JU4N3N",
# time: 1520103488.314,
# type: "withdrawal",
# aclass: "currency",
# asset: "XETH",
# amount: "-0.2805800000",
# fee: "0.0050000000",
# balance: "0.0000051000" },
result = self.safe_value(response, 'result', {})
ledger = self.safe_value(result, 'ledger', {})
keys = list(ledger.keys())
items = []
for i in range(0, len(keys)):
key = keys[i]
value = ledger[key]
value['id'] = key
items.append(value)
return self.parse_ledger(items, currency, since, limit)
def fetch_ledger_entries_by_ids(self, ids, code=None, params={}):
# https://www.kraken.com/features/api#query-ledgers
self.load_markets()
ids = ','.join(ids)
request = self.extend({
'id': ids,
}, params)
response = self.privatePostQueryLedgers(request)
# { error: [],
# result: {'LPUAIB-TS774-UKHP7X': { refid: "A2B4HBV-L4MDIE-JU4N3N",
# time: 1520103488.314,
# type: "withdrawal",
# aclass: "currency",
# asset: "XETH",
# amount: "-0.2805800000",
# fee: "0.0050000000",
# balance: "0.0000051000" }}}
result = response['result']
keys = list(result.keys())
items = []
for i in range(0, len(keys)):
key = keys[i]
value = result[key]
value['id'] = key
items.append(value)
return self.parse_ledger(items)
def fetch_ledger_entry(self, id, code=None, params={}):
items = self.fetch_ledger_entries_by_ids([id], code, params)
return items[0]
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# [
# "0.032310", # price
# "4.28169434", # amount
# 1541390792.763, # timestamp
# "s", # sell or buy
# "l", # limit or market
# ""
# ]
#
# fetchOrderTrades(private)
#
# {
# id: 'TIMIRG-WUNNE-RRJ6GT', # injected from outside
# ordertxid: 'OQRPN2-LRHFY-HIFA7D',
# postxid: 'TKH2SE-M7IF5-CFI7LT',
# pair: 'USDCUSDT',
# time: 1586340086.457,
# type: 'sell',
# ordertype: 'market',
# price: '0.99860000',
# cost: '22.16892001',
# fee: '0.04433784',
# vol: '22.20000000',
# margin: '0.00000000',
# misc: ''
# }
#
timestamp = None
side = None
type = None
price = None
amount = None
cost = None
id = None
order = None
fee = None
symbol = None
if isinstance(trade, list):
timestamp = self.safe_timestamp(trade, 2)
side = 'sell' if (trade[3] == 's') else 'buy'
type = 'limit' if (trade[4] == 'l') else 'market'
price = self.safe_float(trade, 0)
amount = self.safe_float(trade, 1)
tradeLength = len(trade)
if tradeLength > 6:
id = self.safe_string(trade, 6) # artificially added as per #1794
elif isinstance(trade, basestring):
id = trade
elif 'ordertxid' in trade:
marketId = self.safe_string(trade, 'pair')
foundMarket = self.find_market_by_altname_or_id(marketId)
if foundMarket is not None:
market = foundMarket
elif marketId is not None:
# delisted market ids go here
market = self.get_delisted_market_by_id(marketId)
order = trade['ordertxid']
id = self.safe_string_2(trade, 'id', 'postxid')
timestamp = self.safe_timestamp(trade, 'time')
side = self.safe_string(trade, 'type')
type = self.safe_string(trade, 'ordertype')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'vol')
if 'fee' in trade:
currency = None
if market is not None:
currency = market['quote']
fee = {
'cost': self.safe_float(trade, 'fee'),
'currency': currency,
}
if market is not None:
symbol = market['symbol']
if price is not None:
if amount is not None:
cost = price * amount
return {
'id': id,
'order': order,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
id = market['id']
request = {
'pair': id,
}
# https://support.kraken.com/hc/en-us/articles/218198197-How-to-pull-all-trade-data-using-the-Kraken-REST-API
# https://github.com/ccxt/ccxt/issues/5677
if since is not None:
# php does not format it properly
# therefore we use string concatenation here
request['since'] = since * 1e6
request['since'] = str(since) + '000000' # expected to be in nanoseconds
# https://github.com/ccxt/ccxt/issues/5698
if limit is not None and limit != 1000:
fetchTradesWarning = self.safe_value(self.options, 'fetchTradesWarning', True)
if fetchTradesWarning:
raise ExchangeError(self.id + ' fetchTrades() cannot serve ' + str(limit) + " trades without breaking the pagination, see https://github.com/ccxt/ccxt/issues/5698 for more details. Set exchange.options['fetchTradesWarning'] to acknowledge self warning and silence it.")
response = self.publicGetTrades(self.extend(request, params))
#
# {
# "error": [],
# "result": {
# "XETHXXBT": [
# ["0.032310","4.28169434",1541390792.763,"s","l",""]
# ],
# "last": "1541439421200678657"
# }
# }
#
result = response['result']
trades = result[id]
# trades is a sorted array: last(most recent trade) goes last
length = len(trades)
if length <= 0:
return []
lastTrade = trades[length - 1]
lastTradeId = self.safe_string(result, 'last')
lastTrade.append(lastTradeId)
return self.parse_trades(trades, market, since, limit)
def fetch_balance(self, params={}):
response = self.privatePostBalance(params)
balances = self.safe_value(response, 'result', {})
result = {'info': balances}
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balances, currencyId)
result[code] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'type': side,
'ordertype': type,
'volume': self.amount_to_precision(symbol, amount),
}
clientOrderId = self.safe_string_2(params, 'userref', 'clientOrderId')
query = self.omit(params, ['userref', 'clientOrderId'])
if clientOrderId is not None:
request['userref'] = clientOrderId
priceIsDefined = (price is not None)
marketOrder = (type == 'market')
limitOrder = (type == 'limit')
shouldIncludePrice = limitOrder or (not marketOrder and priceIsDefined)
if shouldIncludePrice:
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostAddOrder(self.extend(request, query))
id = self.safe_value(response['result'], 'txid')
if id is not None:
if isinstance(id, list):
length = len(id)
id = id if (length > 1) else id[0]
return {
'id': id,
'clientOrderId': clientOrderId,
'info': response,
'timestamp': None,
'datetime': None,
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': None,
'filled': None,
'remaining': None,
'status': None,
'fee': None,
'trades': None,
}
def find_market_by_altname_or_id(self, id):
if id in self.marketsByAltname:
return self.marketsByAltname[id]
elif id in self.markets_by_id:
return self.markets_by_id[id]
return None
def get_delisted_market_by_id(self, id):
if id is None:
return id
market = self.safe_value(self.options['delistedMarketsById'], id)
if market is not None:
return market
baseIdStart = 0
baseIdEnd = 3
quoteIdStart = 3
quoteIdEnd = 6
if len(id) == 8:
baseIdEnd = 4
quoteIdStart = 4
quoteIdEnd = 8
elif len(id) == 7:
baseIdEnd = 4
quoteIdStart = 4
quoteIdEnd = 7
baseId = id[baseIdStart:baseIdEnd]
quoteId = id[quoteIdStart:quoteIdEnd]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
market = {
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
}
self.options['delistedMarketsById'][id] = market
return market
def parse_order_status(self, status):
statuses = {
'pending': 'open', # order pending book entry
'open': 'open',
'closed': 'closed',
'canceled': 'canceled',
'expired': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
description = self.safe_value(order, 'descr', {})
side = self.safe_string(description, 'type')
type = self.safe_string(description, 'ordertype')
marketId = self.safe_string(description, 'pair')
foundMarket = self.find_market_by_altname_or_id(marketId)
symbol = None
if foundMarket is not None:
market = foundMarket
elif marketId is not None:
# delisted market ids go here
market = self.get_delisted_market_by_id(marketId)
timestamp = self.safe_timestamp(order, 'opentm')
amount = self.safe_float(order, 'vol')
filled = self.safe_float(order, 'vol_exec')
remaining = amount - filled
fee = None
cost = self.safe_float(order, 'cost')
price = self.safe_float(description, 'price')
if (price is None) or (price == 0):
price = self.safe_float(description, 'price2')
if (price is None) or (price == 0):
price = self.safe_float(order, 'price', price)
average = self.safe_float(order, 'price')
if market is not None:
symbol = market['symbol']
if 'fee' in order:
flags = order['oflags']
feeCost = self.safe_float(order, 'fee')
fee = {
'cost': feeCost,
'rate': None,
}
if flags.find('fciq') >= 0:
fee['currency'] = market['quote']
elif flags.find('fcib') >= 0:
fee['currency'] = market['base']
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
clientOrderId = self.safe_string(order, 'userref')
rawTrades = self.safe_value(order, 'trades')
trades = None
if rawTrades is not None:
trades = self.parse_trades(rawTrades, market, None, None, {'order': id})
return {
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'average': average,
'remaining': remaining,
'fee': fee,
'trades': trades,
}
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
result = []
ids = list(orders.keys())
symbol = None
if market is not None:
symbol = market['symbol']
for i in range(0, len(ids)):
id = ids[i]
order = self.extend({'id': id}, orders[id])
result.append(self.extend(self.parse_order(order, market), params))
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
clientOrderId = self.safe_value_2(params, 'userref', 'clientOrderId')
request = {
'trades': True, # whether or not to include trades in output(optional, default False)
# 'txid': id, # do not comma separate a list of ids - use fetchOrdersByIds instead
# 'userref': 'optional', # restrict results to given user reference id(optional)
}
query = params
if clientOrderId is not None:
request['userref'] = clientOrderId
query = self.omit(params, ['userref', 'clientOrderId'])
else:
request['txid'] = id
response = self.privatePostQueryOrders(self.extend(request, query))
#
# {
# "error":[],
# "result":{
# "OTLAS3-RRHUF-NDWH5A":{
# "refid":null,
# "userref":null,
# "status":"closed",
# "reason":null,
# "opentm":1586822919.3342,
# "closetm":1586822919.365,
# "starttm":0,
# "expiretm":0,
# "descr":{
# "pair":"XBTUSDT",
# "type":"sell",
# "ordertype":"market",
# "price":"0",
# "price2":"0",
# "leverage":"none",
# "order":"sell 0.21804000 XBTUSDT @ market",
# "close":""
# },
# "vol":"0.21804000",
# "vol_exec":"0.21804000",
# "cost":"1493.9",
# "fee":"3.8",
# "price":"6851.5",
# "stopprice":"0.00000",
# "limitprice":"0.00000",
# "misc":"",
# "oflags":"fciq",
# "trades":["TT5UC3-GOIRW-6AZZ6R"]
# }
# }
# }
#
orders = self.safe_value(response, 'result', [])
order = self.parse_order(self.extend({'id': id}, orders[id]))
return self.extend({'info': response}, order)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
orderTrades = self.safe_value(params, 'trades')
tradeIds = []
if orderTrades is None:
raise ArgumentsRequired(self.id + " fetchOrderTrades requires a unified order structure in the params argument or a 'trades' param(an array of trade id strings)")
else:
for i in range(0, len(orderTrades)):
orderTrade = orderTrades[i]
if isinstance(orderTrade, basestring):
tradeIds.append(orderTrade)
else:
tradeIds.append(orderTrade['id'])
self.load_markets()
options = self.safe_value(self.options, 'fetchOrderTrades', {})
batchSize = self.safe_integer(options, 'batchSize', 20)
numBatches = int(tradeIds / batchSize)
numBatches = self.sum(numBatches, 1)
numTradeIds = len(tradeIds)
result = []
for j in range(0, numBatches):
requestIds = []
for k in range(0, batchSize):
index = self.sum(j * batchSize, k)
if index < numTradeIds:
requestIds.append(tradeIds[index])
request = {
'txid': ','.join(requestIds),
}
response = self.privatePostQueryTrades(request)
#
# {
# error: [],
# result: {
# 'TIMIRG-WUNNE-RRJ6GT': {
# ordertxid: 'OQRPN2-LRHFY-HIFA7D',
# postxid: 'TKH2SE-M7IF5-CFI7LT',
# pair: 'USDCUSDT',
# time: 1586340086.457,
# type: 'sell',
# ordertype: 'market',
# price: '0.99860000',
# cost: '22.16892001',
# fee: '0.04433784',
# vol: '22.20000000',
# margin: '0.00000000',
# misc: ''
# }
# }
# }
#
rawTrades = self.safe_value(response, 'result')
ids = list(rawTrades.keys())
for i in range(0, len(ids)):
rawTrades[ids[i]]['id'] = ids[i]
trades = self.parse_trades(rawTrades, None, since, limit)
tradesFilteredBySymbol = self.filter_by_symbol(trades, symbol)
result = self.array_concat(result, tradesFilteredBySymbol)
return result
def fetch_orders_by_ids(self, ids, symbol=None, params={}):
self.load_markets()
response = self.privatePostQueryOrders(self.extend({
'trades': True, # whether or not to include trades in output(optional, default False)
'txid': ','.join(ids), # comma delimited list of transaction ids to query info about(20 maximum)
}, params))
result = self.safe_value(response, 'result', {})
orders = []
orderIds = list(result.keys())
for i in range(0, len(orderIds)):
id = orderIds[i]
item = result[id]
order = self.parse_order(self.extend({'id': id}, item))
orders.append(order)
return orders
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'type': 'all', # any position, closed position, closing position, no position
# 'trades': False, # whether or not to include trades related to position in output
# 'start': 1234567890, # starting unix timestamp or trade tx id of results(exclusive)
# 'end': 1234567890, # ending unix timestamp or trade tx id of results(inclusive)
# 'ofs' = result offset
}
if since is not None:
request['start'] = int(since / 1000)
response = self.privatePostTradesHistory(self.extend(request, params))
#
# {
# "error": [],
# "result": {
# "trades": {
# "GJ3NYQ-XJRTF-THZABF": {
# "ordertxid": "TKH2SE-ZIF5E-CFI7LT",
# "postxid": "OEN3VX-M7IF5-JNBJAM",
# "pair": "XICNXETH",
# "time": 1527213229.4491,
# "type": "sell",
# "ordertype": "limit",
# "price": "0.001612",
# "cost": "0.025792",
# "fee": "0.000026",
# "vol": "16.00000000",
# "margin": "0.000000",
# "misc": ""
# },
# ...
# },
# "count": 9760,
# },
# }
#
trades = response['result']['trades']
ids = list(trades.keys())
for i in range(0, len(ids)):
trades[ids[i]]['id'] = ids[i]
result = self.parse_trades(trades, None, since, limit)
if symbol is None:
return result
return self.filter_by_symbol(result, symbol)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
response = None
try:
response = self.privatePostCancelOrder(self.extend({
'txid': id,
}, params))
except Exception as e:
if self.last_http_response:
if self.last_http_response.find('EOrder:Unknown order') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() error ' + self.last_http_response)
raise e
return response
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
if since is not None:
request['start'] = int(since / 1000)
response = self.privatePostOpenOrders(self.extend(request, params))
orders = self.parse_orders(response['result']['open'], None, since, limit)
if symbol is None:
return orders
return self.filter_by_symbol(orders, symbol)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
if since is not None:
request['start'] = int(since / 1000)
response = self.privatePostClosedOrders(self.extend(request, params))
orders = self.parse_orders(response['result']['closed'], None, since, limit)
if symbol is None:
return orders
return self.filter_by_symbol(orders, symbol)
def fetch_deposit_methods(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = self.privatePostDepositMethods(self.extend(request, params))
return response['result']
def parse_transaction_status(self, status):
# IFEX transaction states
statuses = {
'Initial': 'pending',
'Pending': 'pending',
'Success': 'ok',
'Settled': 'pending',
'Failure': 'failed',
'Partial': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {method: "Ether(Hex)",
# aclass: "currency",
# asset: "XETH",
# refid: "Q2CANKL-LBFVEE-U4Y2WQ",
# txid: "0x57fd704dab1a73c20e24c8696099b695d596924b401b261513cfdab23…",
# info: "0x615f9ba7a9575b0ab4d571b2b36b1b324bd83290",
# amount: "7.9999257900",
# fee: "0.0000000000",
# time: 1529223212,
# status: "Success" }
#
# fetchWithdrawals
#
# {method: "Ether",
# aclass: "currency",
# asset: "XETH",
# refid: "A2BF34S-O7LBNQ-UE4Y4O",
# txid: "0x288b83c6b0904d8400ef44e1c9e2187b5c8f7ea3d838222d53f701a15b5c274d",
# info: "0x7cb275a5e07ba943fee972e165d80daa67cb2dd0",
# amount: "9.9950000000",
# fee: "0.0050000000",
# time: 1530481750,
# status: "Success" }
#
id = self.safe_string(transaction, 'refid')
txid = self.safe_string(transaction, 'txid')
timestamp = self.safe_timestamp(transaction, 'time')
currencyId = self.safe_string(transaction, 'asset')
code = self.safe_currency_code(currencyId, currency)
address = self.safe_string(transaction, 'info')
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = self.safe_string(transaction, 'type') # injected from the outside
feeCost = self.safe_float(transaction, 'fee')
if feeCost is None:
if type == 'deposit':
feeCost = 0
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filter_by_currency_since_limit(result, code, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
# https://www.kraken.com/en-us/help/api#deposit-status
if code is None:
raise ArgumentsRequired(self.id + ' fetchDeposits requires a currency code argument')
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = self.privatePostDepositStatus(self.extend(request, params))
#
# { error: [],
# result: [{method: "Ether(Hex)",
# aclass: "currency",
# asset: "XETH",
# refid: "Q2CANKL-LBFVEE-U4Y2WQ",
# txid: "0x57fd704dab1a73c20e24c8696099b695d596924b401b261513cfdab23…",
# info: "0x615f9ba7a9575b0ab4d571b2b36b1b324bd83290",
# amount: "7.9999257900",
# fee: "0.0000000000",
# time: 1529223212,
# status: "Success" }]}
#
return self.parse_transactions_by_type('deposit', response['result'], code, since, limit)
def fetch_time(self, params={}):
# https://www.kraken.com/en-us/features/api#get-server-time
response = self.publicGetTime(params)
#
# {
# "error": [],
# "result": {
# "unixtime": 1591502873,
# "rfc1123": "Sun, 7 Jun 20 04:07:53 +0000"
# }
# }
#
result = self.safe_value(response, 'result', {})
return self.safe_timestamp(result, 'unixtime')
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
# https://www.kraken.com/en-us/help/api#withdraw-status
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals requires a currency code argument')
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = self.privatePostWithdrawStatus(self.extend(request, params))
#
# { error: [],
# result: [{method: "Ether",
# aclass: "currency",
# asset: "XETH",
# refid: "A2BF34S-O7LBNQ-UE4Y4O",
# txid: "0x298c83c7b0904d8400ef43e1c9e2287b518f7ea3d838822d53f704a1565c274d",
# info: "0x7cb275a5e07ba943fee972e165d80daa67cb2dd0",
# amount: "9.9950000000",
# fee: "0.0050000000",
# time: 1530481750,
# status: "Success" }]}
#
return self.parse_transactions_by_type('withdrawal', response['result'], code, since, limit)
def create_deposit_address(self, code, params={}):
request = {
'new': 'true',
}
response = self.fetch_deposit_address(code, self.extend(request, params))
address = self.safe_string(response, 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'info': response,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
# eslint-disable-next-line quotes
method = self.safe_string(params, 'method')
if method is None:
if self.options['cacheDepositMethodsOnFetchDepositAddress']:
# cache depositMethods
if not (code in self.options['depositMethods']):
self.options['depositMethods'][code] = self.fetch_deposit_methods(code)
method = self.options['depositMethods'][code][0]['method']
else:
raise ArgumentsRequired(self.id + ' fetchDepositAddress() requires an extra `method` parameter. Use fetchDepositMethods("' + code + '") to get a list of available deposit methods or enable the exchange property .options["cacheDepositMethodsOnFetchDepositAddress"] = True')
request = {
'asset': currency['id'],
'method': method,
}
response = self.privatePostDepositAddresses(self.extend(request, params)) # overwrite methods
result = response['result']
numResults = len(result)
if numResults < 1:
raise InvalidAddress(self.id + ' privatePostDepositAddresses() returned no addresses')
address = self.safe_string(result[0], 'address')
tag = self.safe_string_2(result[0], 'tag', 'memo')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
if 'key' in params:
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
'amount': amount,
# 'address': address, # they don't allow withdrawals to direct addresses
}
response = self.privatePostWithdraw(self.extend(request, params))
return {
'info': response,
'id': response['result'],
}
raise ExchangeError(self.id + " withdraw requires a 'key' parameter(withdrawal key name, as set up on your account)")
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.version + '/' + api + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
elif api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.urlencode(self.extend({'nonce': nonce}, params))
auth = self.encode(nonce + body)
hash = self.hash(auth, 'sha256', 'binary')
binary = self.encode(url)
binhash = self.binary_concat(binary, hash)
secret = base64.b64decode(self.secret)
signature = self.hmac(binhash, secret, hashlib.sha512, 'base64')
headers = {
'API-Key': self.apiKey,
'API-Sign': self.decode(signature),
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
url = '/' + path
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def nonce(self):
return self.milliseconds()
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code == 520:
raise ExchangeNotAvailable(self.id + ' ' + str(code) + ' ' + reason)
# todo: rewrite self for "broad" exceptions matching
if body.find('Invalid order') >= 0:
raise InvalidOrder(self.id + ' ' + body)
if body.find('Invalid nonce') >= 0:
raise InvalidNonce(self.id + ' ' + body)
if body.find('Insufficient funds') >= 0:
raise InsufficientFunds(self.id + ' ' + body)
if body.find('Cancel pending') >= 0:
raise CancelPending(self.id + ' ' + body)
if body.find('Invalid arguments:volume') >= 0:
raise InvalidOrder(self.id + ' ' + body)
if body[0] == '{':
if not isinstance(response, basestring):
if 'error' in response:
numErrors = len(response['error'])
if numErrors:
message = self.id + ' ' + body
for i in range(0, len(response['error'])):
error = response['error'][i]
self.throw_exactly_matched_exception(self.exceptions, error, message)
raise ExchangeError(message)
| 40.439698 | 288 | 0.466511 |
ge import Exchange
try:
basestring
except NameError:
basestring = str
import base64
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
class kraken(Exchange):
def describe(self):
return self.deep_extend(super(kraken, self).describe(), {
'id': 'kraken',
'name': 'Kraken',
'countries': ['US'],
'version': '0',
'rateLimit': 3000,
'certified': True,
'pro': True,
'has': {
'createDepositAddress': True,
'fetchDepositAddress': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'CORS': False,
'fetchCurrencies': True,
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchWithdrawals': True,
'fetchDeposits': True,
'withdraw': True,
'fetchLedgerEntry': True,
'fetchLedger': True,
'fetchOrderTrades': 'emulated',
'fetchTime': True,
},
'marketsByAltname': {},
'timeframes': {
'1m': 1,
'5m': 5,
'15m': 15,
'30m': 30,
'1h': 60,
'4h': 240,
'1d': 1440,
'1w': 10080,
'2w': 21600,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/76173629-fc67fb00-61b1-11ea-84fe-f2de582f58a3.jpg',
'api': {
'public': 'https://api.kraken.com',
'private': 'https://api.kraken.com',
'zendesk': 'https://kraken.zendesk.com/api/v2/help_center/en-us/articles',
},
'www': 'https://www.kraken.com',
'doc': 'https://www.kraken.com/features/api',
'fees': 'https://www.kraken.com/en-us/features/fee-schedule',
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.26 / 100,
'maker': 0.16 / 100,
'tiers': {
'taker': [
[0, 0.0026],
[50000, 0.0024],
[100000, 0.0022],
[250000, 0.0020],
[500000, 0.0018],
[1000000, 0.0016],
[2500000, 0.0014],
[5000000, 0.0012],
[10000000, 0.0001],
],
'maker': [
[0, 0.0016],
[50000, 0.0014],
[100000, 0.0012],
[250000, 0.0010],
[500000, 0.0008],
[1000000, 0.0006],
[2500000, 0.0004],
[5000000, 0.0002],
[10000000, 0.0],
],
},
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'ETH': 0.005,
'XRP': 0.02,
'XLM': 0.00002,
'LTC': 0.02,
'DOGE': 2,
'ZEC': 0.00010,
'ICN': 0.02,
'REP': 0.01,
'ETC': 0.005,
'MLN': 0.003,
'XMR': 0.05,
'DASH': 0.005,
'GNO': 0.01,
'EOS': 0.5,
'BCH': 0.001,
'XTZ': 0.05,
'USD': 5,
'EUR': 5,
'CAD': 10,
'JPY': 300,
},
'deposit': {
'BTC': 0,
'ETH': 0,
'XRP': 0,
'XLM': 0,
'LTC': 0,
'DOGE': 0,
'ZEC': 0,
'ICN': 0,
'REP': 0,
'ETC': 0,
'MLN': 0,
'XMR': 0,
'DASH': 0,
'GNO': 0,
'EOS': 0,
'BCH': 0,
'XTZ': 0.05,
'USD': 5,
'EUR': 0,
'CAD': 5,
'JPY': 0,
},
},
},
'api': {
'zendesk': {
'get': [
'205893708',
'360000292886',
'201893608',
],
},
'public': {
'get': [
'Assets',
'AssetPairs',
'Depth',
'OHLC',
'Spread',
'Ticker',
'Time',
'Trades',
],
},
'private': {
'post': [
'AddOrder',
'AddExport',
'Balance',
'CancelOrder',
'ClosedOrders',
'DepositAddresses',
'DepositMethods',
'DepositStatus',
'ExportStatus',
'GetWebSocketsToken',
'Ledgers',
'OpenOrders',
'OpenPositions',
'QueryLedgers',
'QueryOrders',
'QueryTrades',
'RetrieveExport',
'RemoveExport',
'TradeBalance',
'TradesHistory',
'TradeVolume',
'Withdraw',
'WithdrawCancel',
'WithdrawInfo',
'WithdrawStatus',
],
},
},
'commonCurrencies': {
'XBT': 'BTC',
'XDG': 'DOGE',
},
'options': {
'cacheDepositMethodsOnFetchDepositAddress': True,
'depositMethods': {},
'delistedMarketsById': {},
'inactiveCurrencies': ['CAD', 'USD', 'JPY', 'GBP'],
'fetchMinOrderAmounts': True,
},
'exceptions': {
'EQuery:Invalid asset pair': BadSymbol,
'EAPI:Invalid key': AuthenticationError,
'EFunding:Unknown withdraw key': ExchangeError,
'EFunding:Invalid amount': InsufficientFunds,
'EService:Unavailable': ExchangeNotAvailable,
'EDatabase:Internal error': ExchangeNotAvailable,
'EService:Busy': ExchangeNotAvailable,
'EQuery:Unknown asset': ExchangeError,
'EAPI:Rate limit exceeded': DDoSProtection,
'EOrder:Rate limit exceeded': DDoSProtection,
'EGeneral:Internal error': ExchangeNotAvailable,
'EGeneral:Temporary lockout': DDoSProtection,
'EGeneral:Permission denied': PermissionDenied,
},
})
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['price'], DECIMAL_PLACES)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def fetch_min_order_amounts(self, params={}):
response = self.zendeskGet205893708(params)
article = self.safe_value(response, 'article')
html = self.safe_string(article, 'body')
parts = html.split('<td class="wysiwyg-text-align-right">')
numParts = len(parts)
if numParts < 3:
raise NotSupported(self.id + ' fetchMinOrderAmounts HTML page markup has changed: https://kraken.zendesk.com/api/v2/help_center/en-us/articles/205893708')
result = {}
for i in range(2, len(parts)):
part = parts[i]
chunks = part.split('</td>')
amountAndCode = chunks[0]
if amountAndCode != 'To Be Announced':
pieces = amountAndCode.split(' ')
numPieces = len(pieces)
if numPieces == 2:
amount = float(pieces[0])
code = self.safe_currency_code(pieces[1])
result[code] = amount
return result
def fetch_markets(self, params={}):
response = self.publicGetAssetPairs(params)
fetchMinOrderAmounts = self.safe_value(self.options, 'fetchMinOrderAmounts', False)
limits = {}
if fetchMinOrderAmounts:
limits = self.fetch_min_order_amounts()
keys = list(response['result'].keys())
result = []
for i in range(0, len(keys)):
id = keys[i]
market = response['result'][id]
baseId = market['base']
quoteId = market['quote']
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
darkpool = id.find('.d') >= 0
symbol = market['altname'] if darkpool else (base + '/' + quote)
maker = None
if 'fees_maker' in market:
maker = float(market['fees_maker'][0][1]) / 100
precision = {
'amount': market['lot_decimals'],
'price': market['pair_decimals'],
}
minAmount = math.pow(10, -precision['amount'])
if base in limits:
minAmount = limits[base]
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'darkpool': darkpool,
'info': market,
'altname': market['altname'],
'maker': maker,
'taker': float(market['fees'][0][1]) / 100,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
})
result = self.append_inactive_markets(result)
self.marketsByAltname = self.index_by(result, 'altname')
return result
def safe_currency_code(self, currencyId, currency=None):
if len(currencyId) > 3:
if (currencyId.find('X') == 0) or (currencyId.find('Z') == 0):
currencyId = currencyId[1:]
return super(kraken, self).safe_currency_code(currencyId, currency)
def append_inactive_markets(self, result):
precision = {'amount': 8, 'price': 8}
costLimits = {'min': 0, 'max': None}
priceLimits = {'min': math.pow(10, -precision['price']), 'max': None}
amountLimits = {'min': math.pow(10, -precision['amount']), 'max': math.pow(10, precision['amount'])}
limits = {'amount': amountLimits, 'price': priceLimits, 'cost': costLimits}
defaults = {
'darkpool': False,
'info': None,
'maker': None,
'taker': None,
'active': False,
'precision': precision,
'limits': limits,
}
markets = [
]
for i in range(0, len(markets)):
result.append(self.extend(defaults, markets[i]))
return result
def fetch_currencies(self, params={}):
response = self.publicGetAssets(params)
currencies = self.safe_value(response, 'result')
ids = list(currencies.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = currencies[id]
code = self.safe_currency_code(self.safe_string(currency, 'altname'))
precision = self.safe_integer(currency, 'decimals')
active = not self.in_array(code, self.options['inactiveCurrencies'])
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': code,
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
}
return result
def fetch_trading_fees(self, params={}):
self.load_markets()
self.check_required_credentials()
response = self.privatePostTradeVolume(params)
tradedVolume = self.safe_float(response['result'], 'volume')
tiers = self.fees['trading']['tiers']
taker = tiers['taker'][1]
maker = tiers['maker'][1]
for i in range(0, len(tiers['taker'])):
if tradedVolume >= tiers['taker'][i][0]:
taker = tiers['taker'][i][1]
for i in range(0, len(tiers['maker'])):
if tradedVolume >= tiers['maker'][i][0]:
maker = tiers['maker'][i][1]
return {
'info': response,
'maker': maker,
'taker': taker,
}
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1):
price = self.safe_float(bidask, priceKey)
amount = self.safe_float(bidask, amountKey)
timestamp = self.safe_integer(bidask, 2)
return [price, amount, timestamp]
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
if market['darkpool']:
raise ExchangeError(self.id + ' does not provide an order book for darkpool symbol ' + symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['count'] = limit
response = self.publicGetDepth(self.extend(request, params))
result = self.safe_value(response, 'result', {})
orderbook = self.safe_value(result, market['id'])
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
baseVolume = float(ticker['v'][1])
vwap = float(ticker['p'][1])
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = float(ticker['c'][0])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['h'][1]),
'low': float(ticker['l'][1]),
'bid': float(ticker['b'][0]),
'bidVolume': None,
'ask': float(ticker['a'][0]),
'askVolume': None,
'vwap': vwap,
'open': self.safe_float(ticker, 'o'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
symbols = self.symbols if (symbols is None) else symbols
marketIds = []
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
market = self.markets[symbol]
if market['active'] and not market['darkpool']:
marketIds.append(market['id'])
request = {
'pair': ','.join(marketIds),
}
response = self.publicGetTicker(self.extend(request, params))
tickers = response['result']
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
if self.in_array(symbol, symbols):
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
darkpool = symbol.find('.d') >= 0
if darkpool:
raise ExchangeError(self.id + ' does not provide a ticker for darkpool symbol ' + symbol)
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetTicker(self.extend(request, params))
ticker = response['result'][market['id']]
return self.parse_ticker(ticker, market)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
self.safe_timestamp(ohlcv, 0),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 6),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'interval': self.timeframes[timeframe],
}
if since is not None:
request['since'] = int((since - 1) / 1000)
response = self.publicGetOHLC(self.extend(request, params))
result = self.safe_value(response, 'result', {})
ohlcvs = self.safe_value(result, market['id'], [])
return self.parse_ohlcvs(ohlcvs, market)
def parse_ledger_entry_type(self, type):
types = {
'trade': 'trade',
'withdrawal': 'transaction',
'deposit': 'transaction',
'transfer': 'transfer',
'margin': 'margin',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
id = self.safe_string(item, 'id')
direction = None
account = None
referenceId = self.safe_string(item, 'refid')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'asset'), currency)
amount = self.safe_float(item, 'amount')
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
time = self.safe_float(item, 'time')
timestamp = None
if time is not None:
timestamp = int(time * 1000)
fee = {
'cost': self.safe_float(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_float(item, 'balance')
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
d_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['start'] = int(since / 1000)
response = self.privatePostLedgers(self.extend(request, params))
result = self.safe_value(response, 'result', {})
ledger = self.safe_value(result, 'ledger', {})
keys = list(ledger.keys())
items = []
for i in range(0, len(keys)):
key = keys[i]
value = ledger[key]
value['id'] = key
items.append(value)
return self.parse_ledger(items, currency, since, limit)
def fetch_ledger_entries_by_ids(self, ids, code=None, params={}):
load_markets()
ids = ','.join(ids)
request = self.extend({
'id': ids,
}, params)
response = self.privatePostQueryLedgers(request)
result = response['result']
keys = list(result.keys())
items = []
for i in range(0, len(keys)):
key = keys[i]
value = result[key]
value['id'] = key
items.append(value)
return self.parse_ledger(items)
def fetch_ledger_entry(self, id, code=None, params={}):
items = self.fetch_ledger_entries_by_ids([id], code, params)
return items[0]
def parse_trade(self, trade, market=None):
timestamp = None
side = None
type = None
price = None
amount = None
cost = None
id = None
order = None
fee = None
symbol = None
if isinstance(trade, list):
timestamp = self.safe_timestamp(trade, 2)
side = 'sell' if (trade[3] == 's') else 'buy'
type = 'limit' if (trade[4] == 'l') else 'market'
price = self.safe_float(trade, 0)
amount = self.safe_float(trade, 1)
tradeLength = len(trade)
if tradeLength > 6:
id = self.safe_string(trade, 6) elif isinstance(trade, basestring):
id = trade
elif 'ordertxid' in trade:
marketId = self.safe_string(trade, 'pair')
foundMarket = self.find_market_by_altname_or_id(marketId)
if foundMarket is not None:
market = foundMarket
elif marketId is not None:
market = self.get_delisted_market_by_id(marketId)
order = trade['ordertxid']
id = self.safe_string_2(trade, 'id', 'postxid')
timestamp = self.safe_timestamp(trade, 'time')
side = self.safe_string(trade, 'type')
type = self.safe_string(trade, 'ordertype')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'vol')
if 'fee' in trade:
currency = None
if market is not None:
currency = market['quote']
fee = {
'cost': self.safe_float(trade, 'fee'),
'currency': currency,
}
if market is not None:
symbol = market['symbol']
if price is not None:
if amount is not None:
cost = price * amount
return {
'id': id,
'order': order,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
id = market['id']
request = {
'pair': id,
}
if since is not None:
request['since'] = since * 1e6
request['since'] = str(since) + '000000'
if limit is not None and limit != 1000:
fetchTradesWarning = self.safe_value(self.options, 'fetchTradesWarning', True)
if fetchTradesWarning:
raise ExchangeError(self.id + ' fetchTrades() cannot serve ' + str(limit) + " trades without breaking the pagination, see https://github.com/ccxt/ccxt/issues/5698 for more details. Set exchange.options['fetchTradesWarning'] to acknowledge self warning and silence it.")
response = self.publicGetTrades(self.extend(request, params))
result = response['result']
trades = result[id]
length = len(trades)
if length <= 0:
return []
lastTrade = trades[length - 1]
lastTradeId = self.safe_string(result, 'last')
lastTrade.append(lastTradeId)
return self.parse_trades(trades, market, since, limit)
def fetch_balance(self, params={}):
response = self.privatePostBalance(params)
balances = self.safe_value(response, 'result', {})
result = {'info': balances}
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balances, currencyId)
result[code] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'type': side,
'ordertype': type,
'volume': self.amount_to_precision(symbol, amount),
}
clientOrderId = self.safe_string_2(params, 'userref', 'clientOrderId')
query = self.omit(params, ['userref', 'clientOrderId'])
if clientOrderId is not None:
request['userref'] = clientOrderId
priceIsDefined = (price is not None)
marketOrder = (type == 'market')
limitOrder = (type == 'limit')
shouldIncludePrice = limitOrder or (not marketOrder and priceIsDefined)
if shouldIncludePrice:
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostAddOrder(self.extend(request, query))
id = self.safe_value(response['result'], 'txid')
if id is not None:
if isinstance(id, list):
length = len(id)
id = id if (length > 1) else id[0]
return {
'id': id,
'clientOrderId': clientOrderId,
'info': response,
'timestamp': None,
'datetime': None,
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': None,
'filled': None,
'remaining': None,
'status': None,
'fee': None,
'trades': None,
}
def find_market_by_altname_or_id(self, id):
if id in self.marketsByAltname:
return self.marketsByAltname[id]
elif id in self.markets_by_id:
return self.markets_by_id[id]
return None
def get_delisted_market_by_id(self, id):
if id is None:
return id
market = self.safe_value(self.options['delistedMarketsById'], id)
if market is not None:
return market
baseIdStart = 0
baseIdEnd = 3
quoteIdStart = 3
quoteIdEnd = 6
if len(id) == 8:
baseIdEnd = 4
quoteIdStart = 4
quoteIdEnd = 8
elif len(id) == 7:
baseIdEnd = 4
quoteIdStart = 4
quoteIdEnd = 7
baseId = id[baseIdStart:baseIdEnd]
quoteId = id[quoteIdStart:quoteIdEnd]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
market = {
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
}
self.options['delistedMarketsById'][id] = market
return market
def parse_order_status(self, status):
statuses = {
'pending': 'open',
'open': 'open',
'closed': 'closed',
'canceled': 'canceled',
'expired': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
description = self.safe_value(order, 'descr', {})
side = self.safe_string(description, 'type')
type = self.safe_string(description, 'ordertype')
marketId = self.safe_string(description, 'pair')
foundMarket = self.find_market_by_altname_or_id(marketId)
symbol = None
if foundMarket is not None:
market = foundMarket
elif marketId is not None:
market = self.get_delisted_market_by_id(marketId)
timestamp = self.safe_timestamp(order, 'opentm')
amount = self.safe_float(order, 'vol')
filled = self.safe_float(order, 'vol_exec')
remaining = amount - filled
fee = None
cost = self.safe_float(order, 'cost')
price = self.safe_float(description, 'price')
if (price is None) or (price == 0):
price = self.safe_float(description, 'price2')
if (price is None) or (price == 0):
price = self.safe_float(order, 'price', price)
average = self.safe_float(order, 'price')
if market is not None:
symbol = market['symbol']
if 'fee' in order:
flags = order['oflags']
feeCost = self.safe_float(order, 'fee')
fee = {
'cost': feeCost,
'rate': None,
}
if flags.find('fciq') >= 0:
fee['currency'] = market['quote']
elif flags.find('fcib') >= 0:
fee['currency'] = market['base']
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
clientOrderId = self.safe_string(order, 'userref')
rawTrades = self.safe_value(order, 'trades')
trades = None
if rawTrades is not None:
trades = self.parse_trades(rawTrades, market, None, None, {'order': id})
return {
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'average': average,
'remaining': remaining,
'fee': fee,
'trades': trades,
}
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
result = []
ids = list(orders.keys())
symbol = None
if market is not None:
symbol = market['symbol']
for i in range(0, len(ids)):
id = ids[i]
order = self.extend({'id': id}, orders[id])
result.append(self.extend(self.parse_order(order, market), params))
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
clientOrderId = self.safe_value_2(params, 'userref', 'clientOrderId')
request = {
'trades': True,
tOrderId
query = self.omit(params, ['userref', 'clientOrderId'])
else:
request['txid'] = id
response = self.privatePostQueryOrders(self.extend(request, query))
orders = self.safe_value(response, 'result', [])
order = self.parse_order(self.extend({'id': id}, orders[id]))
return self.extend({'info': response}, order)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
orderTrades = self.safe_value(params, 'trades')
tradeIds = []
if orderTrades is None:
raise ArgumentsRequired(self.id + " fetchOrderTrades requires a unified order structure in the params argument or a 'trades' param(an array of trade id strings)")
else:
for i in range(0, len(orderTrades)):
orderTrade = orderTrades[i]
if isinstance(orderTrade, basestring):
tradeIds.append(orderTrade)
else:
tradeIds.append(orderTrade['id'])
self.load_markets()
options = self.safe_value(self.options, 'fetchOrderTrades', {})
batchSize = self.safe_integer(options, 'batchSize', 20)
numBatches = int(tradeIds / batchSize)
numBatches = self.sum(numBatches, 1)
numTradeIds = len(tradeIds)
result = []
for j in range(0, numBatches):
requestIds = []
for k in range(0, batchSize):
index = self.sum(j * batchSize, k)
if index < numTradeIds:
requestIds.append(tradeIds[index])
request = {
'txid': ','.join(requestIds),
}
response = self.privatePostQueryTrades(request)
rawTrades = self.safe_value(response, 'result')
ids = list(rawTrades.keys())
for i in range(0, len(ids)):
rawTrades[ids[i]]['id'] = ids[i]
trades = self.parse_trades(rawTrades, None, since, limit)
tradesFilteredBySymbol = self.filter_by_symbol(trades, symbol)
result = self.array_concat(result, tradesFilteredBySymbol)
return result
def fetch_orders_by_ids(self, ids, symbol=None, params={}):
self.load_markets()
response = self.privatePostQueryOrders(self.extend({
'trades': True,
'txid': ','.join(ids),
}, params))
result = self.safe_value(response, 'result', {})
orders = []
orderIds = list(result.keys())
for i in range(0, len(orderIds)):
id = orderIds[i]
item = result[id]
order = self.parse_order(self.extend({'id': id}, item))
orders.append(order)
return orders
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
trades = response['result']['trades']
ids = list(trades.keys())
for i in range(0, len(ids)):
trades[ids[i]]['id'] = ids[i]
result = self.parse_trades(trades, None, since, limit)
if symbol is None:
return result
return self.filter_by_symbol(result, symbol)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
response = None
try:
response = self.privatePostCancelOrder(self.extend({
'txid': id,
}, params))
except Exception as e:
if self.last_http_response:
if self.last_http_response.find('EOrder:Unknown order') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() error ' + self.last_http_response)
raise e
return response
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
if since is not None:
request['start'] = int(since / 1000)
response = self.privatePostOpenOrders(self.extend(request, params))
orders = self.parse_orders(response['result']['open'], None, since, limit)
if symbol is None:
return orders
return self.filter_by_symbol(orders, symbol)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
if since is not None:
request['start'] = int(since / 1000)
response = self.privatePostClosedOrders(self.extend(request, params))
orders = self.parse_orders(response['result']['closed'], None, since, limit)
if symbol is None:
return orders
return self.filter_by_symbol(orders, symbol)
def fetch_deposit_methods(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = self.privatePostDepositMethods(self.extend(request, params))
return response['result']
def parse_transaction_status(self, status):
statuses = {
'Initial': 'pending',
'Pending': 'pending',
'Success': 'ok',
'Settled': 'pending',
'Failure': 'failed',
'Partial': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'refid')
txid = self.safe_string(transaction, 'txid')
timestamp = self.safe_timestamp(transaction, 'time')
currencyId = self.safe_string(transaction, 'asset')
code = self.safe_currency_code(currencyId, currency)
address = self.safe_string(transaction, 'info')
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = self.safe_string(transaction, 'type')
feeCost = self.safe_float(transaction, 'fee')
if feeCost is None:
if type == 'deposit':
feeCost = 0
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filter_by_currency_since_limit(result, code, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
e is None:
raise ArgumentsRequired(self.id + ' fetchDeposits requires a currency code argument')
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = self.privatePostDepositStatus(self.extend(request, params))
return self.parse_transactions_by_type('deposit', response['result'], code, since, limit)
def fetch_time(self, params={}):
e = self.publicGetTime(params)
result = self.safe_value(response, 'result', {})
return self.safe_timestamp(result, 'unixtime')
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals requires a currency code argument')
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = self.privatePostWithdrawStatus(self.extend(request, params))
return self.parse_transactions_by_type('withdrawal', response['result'], code, since, limit)
def create_deposit_address(self, code, params={}):
request = {
'new': 'true',
}
response = self.fetch_deposit_address(code, self.extend(request, params))
address = self.safe_string(response, 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'info': response,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
method = self.safe_string(params, 'method')
if method is None:
if self.options['cacheDepositMethodsOnFetchDepositAddress']:
if not (code in self.options['depositMethods']):
self.options['depositMethods'][code] = self.fetch_deposit_methods(code)
method = self.options['depositMethods'][code][0]['method']
else:
raise ArgumentsRequired(self.id + ' fetchDepositAddress() requires an extra `method` parameter. Use fetchDepositMethods("' + code + '") to get a list of available deposit methods or enable the exchange property .options["cacheDepositMethodsOnFetchDepositAddress"] = True')
request = {
'asset': currency['id'],
'method': method,
}
response = self.privatePostDepositAddresses(self.extend(request, params))
result = response['result']
numResults = len(result)
if numResults < 1:
raise InvalidAddress(self.id + ' privatePostDepositAddresses() returned no addresses')
address = self.safe_string(result[0], 'address')
tag = self.safe_string_2(result[0], 'tag', 'memo')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
if 'key' in params:
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
'amount': amount,
PostWithdraw(self.extend(request, params))
return {
'info': response,
'id': response['result'],
}
raise ExchangeError(self.id + " withdraw requires a 'key' parameter(withdrawal key name, as set up on your account)")
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.version + '/' + api + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
elif api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.urlencode(self.extend({'nonce': nonce}, params))
auth = self.encode(nonce + body)
hash = self.hash(auth, 'sha256', 'binary')
binary = self.encode(url)
binhash = self.binary_concat(binary, hash)
secret = base64.b64decode(self.secret)
signature = self.hmac(binhash, secret, hashlib.sha512, 'base64')
headers = {
'API-Key': self.apiKey,
'API-Sign': self.decode(signature),
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
url = '/' + path
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def nonce(self):
return self.milliseconds()
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code == 520:
raise ExchangeNotAvailable(self.id + ' ' + str(code) + ' ' + reason)
# todo: rewrite self for "broad" exceptions matching
if body.find('Invalid order') >= 0:
raise InvalidOrder(self.id + ' ' + body)
if body.find('Invalid nonce') >= 0:
raise InvalidNonce(self.id + ' ' + body)
if body.find('Insufficient funds') >= 0:
raise InsufficientFunds(self.id + ' ' + body)
if body.find('Cancel pending') >= 0:
raise CancelPending(self.id + ' ' + body)
if body.find('Invalid arguments:volume') >= 0:
raise InvalidOrder(self.id + ' ' + body)
if body[0] == '{':
if not isinstance(response, basestring):
if 'error' in response:
numErrors = len(response['error'])
if numErrors:
message = self.id + ' ' + body
for i in range(0, len(response['error'])):
error = response['error'][i]
self.throw_exactly_matched_exception(self.exceptions, error, message)
raise ExchangeError(message)
| true | true |
f7fd96bb420b56fde26a6d4a501e4af400790593 | 1,390 | py | Python | 01_versoes_pacotes_pip/async_api_calls/version_checker/main.py | fernandosavio/hackatombo | 9ef412b976c34c3cbd075b512a3bfcafd86b4d98 | [
"MIT"
] | null | null | null | 01_versoes_pacotes_pip/async_api_calls/version_checker/main.py | fernandosavio/hackatombo | 9ef412b976c34c3cbd075b512a3bfcafd86b4d98 | [
"MIT"
] | null | null | null | 01_versoes_pacotes_pip/async_api_calls/version_checker/main.py | fernandosavio/hackatombo | 9ef412b976c34c3cbd075b512a3bfcafd86b4d98 | [
"MIT"
] | null | null | null | import argparse
from . import __version__
from .parser import Package
def get_options():
"""
Faz o parsing dos argumentos recebidos pela linha de comando.
Docs: https://docs.python.org/3.9/library/argparse.html
Tutorial: https://docs.python.org/3.9/howto/argparse.html
"""
parser = argparse.ArgumentParser(
prog='version_checker',
description='''
Reads package version from a requirements-like text file and check on
PyPI if there is a newer version for each package.
Prints the results as a JSON to stdout.
''',
epilog='''
This script uses "Version matching" especifier (`==`) to recognize
the package version. So especifiers as `!=`, `~=`, `>=`, `>=`, `>` and `<`
are treated as being up to date version.
''',
)
parser.add_argument(
'-f', '--file',
help="requirements file path (defaults to stdin).",
default='-',
type=argparse.FileType('r', encoding='utf8'),
)
parser.add_argument(
'-V', '--version',
action='version',
version=f'%(prog)s {__version__}',
)
return parser.parse_args()
def main():
opts = get_options()
with opts.file as file:
for package in Package.from_file(file):
print(package)
if __name__ == '__main__':
main()
| 27.254902 | 86 | 0.585612 | import argparse
from . import __version__
from .parser import Package
def get_options():
parser = argparse.ArgumentParser(
prog='version_checker',
description='''
Reads package version from a requirements-like text file and check on
PyPI if there is a newer version for each package.
Prints the results as a JSON to stdout.
''',
epilog='''
This script uses "Version matching" especifier (`==`) to recognize
the package version. So especifiers as `!=`, `~=`, `>=`, `>=`, `>` and `<`
are treated as being up to date version.
''',
)
parser.add_argument(
'-f', '--file',
help="requirements file path (defaults to stdin).",
default='-',
type=argparse.FileType('r', encoding='utf8'),
)
parser.add_argument(
'-V', '--version',
action='version',
version=f'%(prog)s {__version__}',
)
return parser.parse_args()
def main():
opts = get_options()
with opts.file as file:
for package in Package.from_file(file):
print(package)
if __name__ == '__main__':
main()
| true | true |
f7fd96d979ac2ea3fa118ca31f4b62a0b3f2438f | 3,832 | py | Python | progentrl/gen_rl.py | icyray/proGENTRL | c48305c3411ecb604c4f26f5e6b62f285e42e696 | [
"MIT"
] | 1 | 2022-01-07T13:39:10.000Z | 2022-01-07T13:39:10.000Z | progentrl/gen_rl.py | christofer-f/proGENTRL | c48305c3411ecb604c4f26f5e6b62f285e42e696 | [
"MIT"
] | null | null | null | progentrl/gen_rl.py | christofer-f/proGENTRL | c48305c3411ecb604c4f26f5e6b62f285e42e696 | [
"MIT"
] | 1 | 2021-08-30T05:47:29.000Z | 2021-08-30T05:47:29.000Z | import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from math import pi, log
from .lp import LP
from .utils import save, load
import joblib
from collections import OrderedDict
from moses.metrics.utils import get_mol
import pytorch_lightning as pl
class oneDataSet(Dataset):
def __init__(self):
self.one_elem = [1]
def __len__(self):
return len(self.one_elem)
def __getitem__(self, idx):
return self.one_elem[0]
class GENTRL_RL(pl.LightningModule):
'''
GENTRL model
'''
def __init__(self,
reward_fn,
enc,
dec,
latent_descr,
feature_descr,
rl_batch_size = 200,
tt_int=40,
tt_type='usual',
beta=0.01,
gamma=0.1,
load_model=None
):
super().__init__()
self.reward_fn = reward_fn
self.rl_batch_size = rl_batch_size
self.num_latent = len(latent_descr)
self.num_features = len(feature_descr)
self.latent_descr = latent_descr
self.feature_descr = feature_descr
self.tt_int = tt_int
self.tt_type = tt_type
self.enc = enc
self.dec = dec
self.beta = beta
self.gamma = gamma
self.lp = LP(distr_descr=self.latent_descr + self.feature_descr,
tt_int=self.tt_int, tt_type=self.tt_type)
if load_model is not None:
self = load(self, load_model)
def forward(self, num_samples):
z = self.lp.sample(num_samples, 50 * ['s'] + ['m'])
smiles = self.dec.sample(50, z, argmax=False)
return smiles
def training_step(self, batch, batch_idx):
exploit_size = int(self.rl_batch_size * (1 - 0.3))
exploit_z = self.lp.sample(exploit_size, 50 * ['s'] + ['m'])
z_means = exploit_z.mean(dim=0)
z_stds = exploit_z.std(dim=0)
expl_size = int(self.rl_batch_size * 0.3)
expl_z = torch.randn(expl_size, exploit_z.shape[1]).to(self.device)
expl_z = 2 * expl_z * z_stds[None, :]
expl_z += z_means[None, :]
z = torch.cat([exploit_z, expl_z])
smiles = self.dec.sample(50, z, argmax=False)
zc = torch.zeros(z.shape[0], 1).to(z.device)
conc_zy = torch.cat([z, zc], dim=1)
log_probs = self.lp.log_prob(conc_zy, marg=50 * [False] + [True])
log_probs += self.dec(smiles, z)
r_list = [self.reward_fn(s) for s in smiles]
rewards = torch.tensor(r_list).float().to(exploit_z.device)
rewards_bl = rewards - rewards.mean()
loss = -(log_probs * rewards_bl).mean()
valid_sm = [s for s in smiles if get_mol(s) is not None]
cur_stats = {
'mean_reward': torch.tensor(sum(r_list) / len(smiles)),
'valid_perc': torch.tensor(len(valid_sm) / len(smiles))
}
output_dict = OrderedDict({
'loss': loss,
'log': cur_stats,
'progress_bar': cur_stats
})
return output_dict
def configure_optimizers(self):
lr_lp=1e-5
lr_dec=1e-6
optimizer = optim.Adam([
{'params': self.lp.parameters()},
{'params': self.dec.latent_fc.parameters(), 'lr': lr_dec}
], lr=lr_lp)
# scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.1)
return [optimizer]#, [scheduler]
def train_dataloader(self):
oneElementDataSet = oneDataSet()
oneElementDataLoader = DataLoader(oneElementDataSet, batch_size=1)
return oneElementDataLoader | 29.705426 | 75 | 0.566545 | import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from math import pi, log
from .lp import LP
from .utils import save, load
import joblib
from collections import OrderedDict
from moses.metrics.utils import get_mol
import pytorch_lightning as pl
class oneDataSet(Dataset):
def __init__(self):
self.one_elem = [1]
def __len__(self):
return len(self.one_elem)
def __getitem__(self, idx):
return self.one_elem[0]
class GENTRL_RL(pl.LightningModule):
def __init__(self,
reward_fn,
enc,
dec,
latent_descr,
feature_descr,
rl_batch_size = 200,
tt_int=40,
tt_type='usual',
beta=0.01,
gamma=0.1,
load_model=None
):
super().__init__()
self.reward_fn = reward_fn
self.rl_batch_size = rl_batch_size
self.num_latent = len(latent_descr)
self.num_features = len(feature_descr)
self.latent_descr = latent_descr
self.feature_descr = feature_descr
self.tt_int = tt_int
self.tt_type = tt_type
self.enc = enc
self.dec = dec
self.beta = beta
self.gamma = gamma
self.lp = LP(distr_descr=self.latent_descr + self.feature_descr,
tt_int=self.tt_int, tt_type=self.tt_type)
if load_model is not None:
self = load(self, load_model)
def forward(self, num_samples):
z = self.lp.sample(num_samples, 50 * ['s'] + ['m'])
smiles = self.dec.sample(50, z, argmax=False)
return smiles
def training_step(self, batch, batch_idx):
exploit_size = int(self.rl_batch_size * (1 - 0.3))
exploit_z = self.lp.sample(exploit_size, 50 * ['s'] + ['m'])
z_means = exploit_z.mean(dim=0)
z_stds = exploit_z.std(dim=0)
expl_size = int(self.rl_batch_size * 0.3)
expl_z = torch.randn(expl_size, exploit_z.shape[1]).to(self.device)
expl_z = 2 * expl_z * z_stds[None, :]
expl_z += z_means[None, :]
z = torch.cat([exploit_z, expl_z])
smiles = self.dec.sample(50, z, argmax=False)
zc = torch.zeros(z.shape[0], 1).to(z.device)
conc_zy = torch.cat([z, zc], dim=1)
log_probs = self.lp.log_prob(conc_zy, marg=50 * [False] + [True])
log_probs += self.dec(smiles, z)
r_list = [self.reward_fn(s) for s in smiles]
rewards = torch.tensor(r_list).float().to(exploit_z.device)
rewards_bl = rewards - rewards.mean()
loss = -(log_probs * rewards_bl).mean()
valid_sm = [s for s in smiles if get_mol(s) is not None]
cur_stats = {
'mean_reward': torch.tensor(sum(r_list) / len(smiles)),
'valid_perc': torch.tensor(len(valid_sm) / len(smiles))
}
output_dict = OrderedDict({
'loss': loss,
'log': cur_stats,
'progress_bar': cur_stats
})
return output_dict
def configure_optimizers(self):
lr_lp=1e-5
lr_dec=1e-6
optimizer = optim.Adam([
{'params': self.lp.parameters()},
{'params': self.dec.latent_fc.parameters(), 'lr': lr_dec}
], lr=lr_lp)
return [optimizer]
def train_dataloader(self):
oneElementDataSet = oneDataSet()
oneElementDataLoader = DataLoader(oneElementDataSet, batch_size=1)
return oneElementDataLoader | true | true |
f7fd9746661308c0bd6cfaea0565773212bb6d9c | 41,244 | py | Python | MicroPythonPkg/MicroPythonDxe/Lib/Uefi/smbios.py | curtisrlee/edk2-staging | 26f310d39e8dcb4fdfb6cb205f1430977dbbc366 | [
"BSD-2-Clause"
] | null | null | null | MicroPythonPkg/MicroPythonDxe/Lib/Uefi/smbios.py | curtisrlee/edk2-staging | 26f310d39e8dcb4fdfb6cb205f1430977dbbc366 | [
"BSD-2-Clause"
] | null | null | null | MicroPythonPkg/MicroPythonDxe/Lib/Uefi/smbios.py | curtisrlee/edk2-staging | 26f310d39e8dcb4fdfb6cb205f1430977dbbc366 | [
"BSD-2-Clause"
] | null | null | null | ## @file
# Definitions of SMBIOS standard.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
from _uefi import *
from ucollections import OrderedDict
from protocols import *
import uefi
SMBIOS_STRUCTURE = OrderedDict((
("Type", 'B'),
("Length", 'B'),
("Handle", 'H')
))
EFI_SMBIOS_PROTOCOL = OrderedDict((
("Add", 'FE(PO#EFI_SMBIOS_PROTOCOL,T,PH,PO#SMBIOS_STRUCTURE)'),
("UpdateString", 'FE(PO#EFI_SMBIOS_PROTOCOL,PH,PN,S)'),
("Remove", 'FE(PO#EFI_SMBIOS_PROTOCOL,H)'),
("GetNext", 'FE(PO#EFI_SMBIOS_PROTOCOL,PH,PB,PPO#SMBIOS_STRUCTURE,PT)'),
("MajorVersion", 'B'),
("MinorVersion", 'B')
))
SMBIOS_TYPES = {
"BIOS_INFORMATION" : 0,
"Type0" : 0,
"SYSTEM_INFORMATION" : 1,
"Type1" : 1,
"BASEBOARD_INFORMATION" : 2,
"Type2" : 2,
"SYSTEM_ENCLOSURE" : 3,
"Type3" : 3,
"PROCESSOR_INFORMATION" : 4,
"Type4" : 4,
"MEMORY_CONTROLLER_INFORMATION" : 5,
"Type5" : 5,
"MEMORY_MODULE_INFORMATON" : 6,
"Type6" : 6,
"CACHE_INFORMATION" : 7,
"Type7" : 7,
"PORT_CONNECTOR_INFORMATION" : 8,
"Type8" : 8,
"SYSTEM_SLOTS" : 9,
"Type9" : 9,
"ONBOARD_DEVICE_INFORMATION" : 10,
"Type10" : 10,
"OEM_STRINGS" : 11,
"Type11" : 11,
"SYSTEM_CONFIGURATION_OPTIONS" : 12,
"Type12" : 12,
"BIOS_LANGUAGE_INFORMATION" : 13,
"Type13" : 13,
"GROUP_ASSOCIATIONS" : 14,
"Type14" : 14,
"SYSTEM_EVENT_LOG" : 15,
"Type15" : 15,
"PHYSICAL_MEMORY_ARRAY" : 16,
"Type16" : 16,
"MEMORY_DEVICE" : 17,
"Type17" : 17,
"32BIT_MEMORY_ERROR_INFORMATION" : 18,
"Type18" : 18,
"MEMORY_ARRAY_MAPPED_ADDRESS" : 19,
"Type19" : 19,
"MEMORY_DEVICE_MAPPED_ADDRESS" : 20,
"Type20" : 20,
"BUILT_IN_POINTING_DEVICE" : 21,
"Type21" : 21,
"PORTABLE_BATTERY" : 22,
"Type22" : 22,
"SYSTEM_RESET" : 23,
"Type23" : 23,
"HARDWARE_SECURITY" : 24,
"Type24" : 24,
"SYSTEM_POWER_CONTROLS" : 25,
"Type25" : 25,
"VOLTAGE_PROBE" : 26,
"Type26" : 26,
"COOLING_DEVICE" : 27,
"Type27" : 27,
"TEMPERATURE_PROBE" : 28,
"Type28" : 28,
"ELECTRICAL_CURRENT_PROBE" : 29,
"Type29" : 29,
"OUT_OF_BAND_REMOTE_ACCESS" : 30,
"Type30" : 30,
"BOOT_INTEGRITY_SERVICE" : 31,
"Type31" : 31,
"SYSTEM_BOOT_INFORMATION" : 32,
"Type32" : 32,
"64BIT_MEMORY_ERROR_INFORMATION" : 33,
"Type33" : 33,
"MANAGEMENT_DEVICE" : 34,
"Type34" : 34,
"MANAGEMENT_DEVICE_COMPONENT" : 35,
"Type35" : 35,
"MANAGEMENT_DEVICE_THRESHOLD_DATA" : 36,
"Type36" : 36,
"MEMORY_CHANNEL" : 37,
"Type37" : 37,
"IPMI_DEVICE_INFORMATION" : 38,
"Type38" : 38,
"SYSTEM_POWER_SUPPLY" : 39,
"Type39" : 39,
"ADDITIONAL_INFORMATION" : 40,
"Type40" : 40,
"ONBOARD_DEVICES_EXTENDED_INFORMATION" : 41,
"Type41" : 41,
"MANAGEMENT_CONTROLLER_HOST_INTERFACE" : 42,
"Type42" : 42,
"TPM_DEVICE" : 43,
"Type43" : 43,
}
SMBIOS_HANDLE = "H"
SMBIOS_TABLE_STRING = "B"
MISC_BIOS_CHARACTERISTICS = OrderedDict((
("Reserved", "Q:2"),
("Unknown", "Q:1"),
("BiosCharacteristicsNotSupported", "Q:1"),
("IsaIsSupported", "Q:1"),
("McaIsSupported", "Q:1"),
("EisaIsSupported", "Q:1"),
("PciIsSupported", "Q:1"),
("PcmciaIsSupported", "Q:1"),
("PlugAndPlayIsSupported", "Q:1"),
("ApmIsSupported", "Q:1"),
("BiosIsUpgradable", "Q:1"),
("BiosShadowingAllowed", "Q:1"),
("VlVesaIsSupported", "Q:1"),
("EscdSupportIsAvailable", "Q:1"),
("BootFromCdIsSupported", "Q:1"),
("SelectableBootIsSupported", "Q:1"),
("RomBiosIsSocketed", "Q:1"),
("BootFromPcmciaIsSupported", "Q:1"),
("EDDSpecificationIsSupported", "Q:1"),
("JapaneseNecFloppyIsSupported", "Q:1"),
("JapaneseToshibaFloppyIsSupported", "Q:1"),
("Floppy525_360IsSupported", "Q:1"),
("Floppy525_12IsSupported", "Q:1"),
("Floppy35_720IsSupported", "Q:1"),
("Floppy35_288IsSupported", "Q:1"),
("PrintScreenIsSupported", "Q:1"),
("Keyboard8042IsSupported", "Q:1"),
("SerialIsSupported", "Q:1"),
("PrinterIsSupported", "Q:1"),
("CgaMonoIsSupported", "Q:1"),
("NecPc98", "Q:1"),
("ReservedForVendor", "Q:32"),
))
EXTENDED_BIOS_ROM_SIZE = OrderedDict((
("Size", "H:14"),
("Unit", "H:2")
))
BASE_BOARD_FEATURE_FLAGS = OrderedDict((
("Motherboard", "B:1"),
("RequiresDaughterCard", "B:1"),
("Removable", "B:1"),
("Replaceable", "B:1"),
("HotSwappable", "B:1"),
("Reserved", "B:3"),
))
CONTAINED_ELEMENT = OrderedDict((
("ContainedElementType", "B"),
("ContainedElementMinimum", "B"),
("ContainedElementMaximum", "B"),
))
PROCESSOR_VOLTAGE = OrderedDict((
("ProcessorVoltageCapability5V", "B:1"),
("ProcessorVoltageCapability3_3V", "B:1"),
("ProcessorVoltageCapability2_9V", "B:1"),
("ProcessorVoltageCapabilityReserved", "B:1"),
("ProcessorVoltageReserved", "B:3"),
("ProcessorVoltageIndicateLegacy", "B:1"),
))
PROCESSOR_SIGNATURE = OrderedDict((
("ProcessorSteppingId", "I:4"),
("ProcessorModel", "I:4"),
("ProcessorFamily", "I:4"),
("ProcessorType", "I:2"),
("ProcessorReserved1", "I:2"),
("ProcessorXModel", "I:4"),
("ProcessorXFamily", "I:8"),
("ProcessorReserved2", "I:4"),
))
PROCESSOR_FEATURE_FLAGS = OrderedDict((
("ProcessorFpu", "I:1"),
("ProcessorVme", "I:1"),
("ProcessorDe", "I:1"),
("ProcessorPse", "I:1"),
("ProcessorTsc", "I:1"),
("ProcessorMsr", "I:1"),
("ProcessorPae", "I:1"),
("ProcessorMce", "I:1"),
("ProcessorCx8", "I:1"),
("ProcessorApic", "I:1"),
("ProcessorReserved1", "I:1"),
("ProcessorSep", "I:1"),
("ProcessorMtrr", "I:1"),
("ProcessorPge", "I:1"),
("ProcessorMca", "I:1"),
("ProcessorCmov", "I:1"),
("ProcessorPat", "I:1"),
("ProcessorPse36", "I:1"),
("ProcessorPsn", "I:1"),
("ProcessorClfsh", "I:1"),
("ProcessorReserved2", "I:1"),
("ProcessorDs", "I:1"),
("ProcessorAcpi", "I:1"),
("ProcessorMmx", "I:1"),
("ProcessorFxsr", "I:1"),
("ProcessorSse", "I:1"),
("ProcessorSse2", "I:1"),
("ProcessorSs", "I:1"),
("ProcessorReserved3", "I:1"),
("ProcessorTm", "I:1"),
("ProcessorReserved4", "I:2"),
))
PROCESSOR_ID_DATA = OrderedDict((
("Signature", "O#PROCESSOR_SIGNATURE"),
("FeatureFlags", "O#PROCESSOR_FEATURE_FLAGS"),
))
MEMORY_ERROR_CORRECT_CAPABILITY = OrderedDict((
("Other", "B:1"),
("Unknown", "B:1"),
("None", "B:1"),
("SingleBitErrorCorrect", "B:1"),
("DoubleBitErrorCorrect", "B:1"),
("ErrorScrubbing", "B:1"),
("Reserved", "B:2"),
))
MEMORY_SPEED_TYPE = OrderedDict((
("Other", "H:1"),
("Unknown", "H:1"),
("SeventyNs", "H:1"),
("SixtyNs", "H:1"),
("FiftyNs", "H:1"),
("Reserved", "H:11"),
))
MEMORY_CURRENT_TYPE = OrderedDict((
("Other", "H:1"),
("Unknown", "H:1"),
("Standard", "H:1"),
("FastPageMode","H:1"),
("Edo", "H:1"),
("Parity", "H:1"),
("Ecc", "H:1"),
("Simm", "H:1"),
("Dimm", "H:1"),
("BurstEdo", "H:1"),
("Sdram", "H:1"),
("Reserved", "H:5"),
))
MEMORY_INSTALLED_ENABLED_SIZE = OrderedDict((
("InstalledOrEnabledSize", "B:7"),
("SingleOrDoubleBank", "B:1"),
))
CACHE_SRAM_TYPE_DATA = OrderedDict((
("Other", "H:1"),
("Unknown", "H:1"),
("NonBurst", "H:1"),
("Burst", "H:1"),
("PipelineBurst", "H:1"),
("Synchronous", "H:1"),
("Asynchronous", "H:1"),
("Reserved", "H:9"),
))
MISC_SLOT_CHARACTERISTICS1 = OrderedDict((
("CharacteristicsUnknown", "B:1"),
("Provides50Volts", "B:1"),
("Provides33Volts", "B:1"),
("SharedSlot", "B:1"),
("PcCard16Supported", "B:1"),
("CardBusSupported", "B:1"),
("ZoomVideoSupported", "B:1"),
("ModemRingResumeSupported","B:1"),
))
MISC_SLOT_CHARACTERISTICS2 = OrderedDict((
("PmeSignalSupported", "B:1"),
("HotPlugDevicesSupported", "B:1"),
("SmbusSignalSupported", "B:1"),
("Reserved", "B:5"),
))
DEVICE_STRUCT = OrderedDict((
("DeviceType", "B"),
("DescriptionString", "O#SMBIOS_TABLE_STRING"),
))
GROUP_STRUCT = OrderedDict((
("ItemType", "B"),
("ItemHandle", "H"),
))
EVENT_LOG_TYPE = OrderedDict((
("LogType", "B"),
("DataFormatType", "B"),
))
MEMORY_DEVICE_TYPE_DETAIL = OrderedDict((
("Reserved", "H:1"),
("Other", "H:1"),
("Unknown", "H:1"),
("FastPaged", "H:1"),
("StaticColumn","H:1"),
("PseudoStatic","H:1"),
("Rambus", "H:1"),
("Synchronous", "H:1"),
("Cmos", "H:1"),
("Edo", "H:1"),
("WindowDram", "H:1"),
("CacheDram", "H:1"),
("Nonvolatile", "H:1"),
("Registered", "H:1"),
("Unbuffered", "H:1"),
("LrDimm", "H:1"),
))
MISC_VOLTAGE_PROBE_LOCATION = OrderedDict((
("VoltageProbeSite", "B:5"),
("VoltageProbeStatus", "B:3"),
))
MISC_COOLING_DEVICE_TYPE = OrderedDict((
("CoolingDevice", "B:5"),
("CoolingDeviceStatus", "B:3"),
))
MISC_TEMPERATURE_PROBE_LOCATION = OrderedDict((
("TemperatureProbeSite", "B:5"),
("TemperatureProbeStatus", "B:3"),
))
MISC_ELECTRICAL_CURRENT_PROBE_LOCATION = OrderedDict((
("ElectricalCurrentProbeSite", "B:5"),
("ElectricalCurrentProbeStatus", "B:3"),
))
MEMORY_DEVICE = OrderedDict((
("DeviceLoad", "B"),
("DeviceHandle", "H"),
))
SYS_POWER_SUPPLY_CHARACTERISTICS = OrderedDict((
("PowerSupplyHotReplaceable", "H:1"),
("PowerSupplyPresent", "H:1"),
("PowerSupplyUnplugged", "H:1"),
("InputVoltageRangeSwitch", "H:4"),
("PowerSupplyStatus", "H:3"),
("PowerSupplyType", "H:4"),
("Reserved", "H:2"),
))
ADDITIONAL_INFORMATION_ENTRY = OrderedDict((
("EntryLength", "B"),
("ReferencedHandle", "H"),
("ReferencedOffset", "B"),
("EntryString", "O#SMBIOS_TABLE_STRING"),
("Value", "1B"),
))
SMBIOS_TABLE_TYPE0 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Vendor", "O#SMBIOS_TABLE_STRING"),
("BiosVersion", "O#SMBIOS_TABLE_STRING"),
("BiosSegment", "H"),
("BiosReleaseDate", "O#SMBIOS_TABLE_STRING"),
("BiosSize", "B"),
("BiosCharacteristics", "O#MISC_BIOS_CHARACTERISTICS"),
("BIOSCharacteristicsExtensionBytes", "2B"),
("SystemBiosMajorRelease", "B"),
("SystemBiosMinorRelease", "B"),
("EmbeddedControllerFirmwareMajorRelease", "B"),
("EmbeddedControllerFirmwareMinorRelease", "B"),
("ExtendedBiosSize", "O#EXTENDED_BIOS_ROM_SIZE"),
))
SMBIOS_TABLE_TYPE1 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("ProductName", "O#SMBIOS_TABLE_STRING"),
("Version", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("Uuid", "G"),
("WakeUpType", "B"),
("SKUNumber", "O#SMBIOS_TABLE_STRING"),
("Family", "O#SMBIOS_TABLE_STRING"),
))
SMBIOS_TABLE_TYPE2 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("ProductName", "O#SMBIOS_TABLE_STRING"),
("Version", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTag", "O#SMBIOS_TABLE_STRING"),
("FeatureFlag", "O#BASE_BOARD_FEATURE_FLAGS"),
("LocationInChassis", "O#SMBIOS_TABLE_STRING"),
("ChassisHandle", "H"),
("BoardType", "B"),
("NumberOfContainedObjectHandles", "B"),
("ContainedObjectHandles", "1H"),
))
SMBIOS_TABLE_TYPE3 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("Type", "B"),
("Version", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTag", "O#SMBIOS_TABLE_STRING"),
("BootupState", "B"),
("PowerSupplyState", "B"),
("ThermalState", "B"),
("SecurityStatus", "B"),
("OemDefined", "4B"),
("Height", "B"),
("NumberofPowerCords", "B"),
("ContainedElementCount", "B"),
("ContainedElementRecordLength","B"),
("ContainedElements", "1O#CONTAINED_ELEMENT"),
))
SMBIOS_TABLE_TYPE4 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Socket", "O#SMBIOS_TABLE_STRING"),
("ProcessorType", "B"),
("ProcessorFamily", "B"),
("ProcessorManufacture", "O#SMBIOS_TABLE_STRING"),
("ProcessorId", "O#PROCESSOR_ID_DATA"),
("ProcessorVersion", "O#SMBIOS_TABLE_STRING"),
("Voltage", "O#PROCESSOR_VOLTAGE"),
("ExternalClock", "H"),
("MaxSpeed", "H"),
("CurrentSpeed", "H"),
("Status", "B"),
("ProcessorUpgrade", "B"),
("L1CacheHandle", "H"),
("L2CacheHandle", "H"),
("L3CacheHandle", "H"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTag", "O#SMBIOS_TABLE_STRING"),
("PartNumber", "B"),
("CoreCount", "B"),
("EnabledCoreCount", "B"),
("ThreadCount", "B"),
("ProcessorCharacteristics", "H"),
("ProcessorFamily2", "H"),
("CoreCount2", "H"),
("EnabledCoreCount2", "H"),
("ThreadCount2", "H"),
))
SMBIOS_TABLE_TYPE5 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ErrDetectMethod", "B"),
("ErrCorrectCapability", "O#MEMORY_ERROR_CORRECT_CAPABILITY"),
("SupportInterleave", "B"),
("CurrentInterleave", "B"),
("MaxMemoryModuleSize", "B"),
("SupportSpeed", "O#MEMORY_SPEED_TYPE"),
("SupportMemoryType", "H"),
("MemoryModuleVoltage", "B"),
("AssociatedMemorySlotNum", "B"),
("MemoryModuleConfigHandles", "1H"),
))
SMBIOS_TABLE_TYPE6 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("SocketDesignation", "O#SMBIOS_TABLE_STRING"),
("BankConnections", "B"),
("CurrentSpeed", "B"),
("CurrentMemoryType", "O#MEMORY_CURRENT_TYPE"),
("InstalledSize", "O#MEMORY_INSTALLED_ENABLED_SIZE"),
("EnabledSize", "O#MEMORY_INSTALLED_ENABLED_SIZE"),
("ErrorStatus", "B"),
))
SMBIOS_TABLE_TYPE7 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("SocketDesignation", "O#SMBIOS_TABLE_STRING"),
("CacheConfiguration", "H"),
("MaximumCacheSize", "H"),
("InstalledSize", "H"),
("SupportedSRAMType", "O#CACHE_SRAM_TYPE_DATA"),
("CurrentSRAMType", "O#CACHE_SRAM_TYPE_DATA"),
("CacheSpeed", "B"),
("ErrorCorrectionType", "B"),
("SystemCacheType", "B"),
("Associativity", "B"),
("MaximumCacheSize2", "I"),
("InstalledSize2", "I"),
))
SMBIOS_TABLE_TYPE8 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("InternalReferenceDesignator", "O#SMBIOS_TABLE_STRING"),
("InternalConnectorType", "B"),
("ExternalReferenceDesignator", "O#SMBIOS_TABLE_STRING"),
("ExternalConnectorType", "B"),
("PortType", "B"),
))
SMBIOS_TABLE_TYPE9 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("SlotDesignation", "O#SMBIOS_TABLE_STRING"),
("SlotType", "B"),
("SlotDataBusWidth", "B"),
("CurrentUsage", "B"),
("SlotLength", "B"),
("SlotID", "H"),
("SlotCharacteristics1", "O#MISC_SLOT_CHARACTERISTICS1"),
("SlotCharacteristics2", "O#MISC_SLOT_CHARACTERISTICS2"),
("SegmentGroupNum", "H"),
("BusNum", "B"),
("DevFuncNum", "B"),
))
SMBIOS_TABLE_TYPE10 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Device", "1O#DEVICE_STRUCT"),
))
SMBIOS_TABLE_TYPE11 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("StringCount", "B"),
))
SMBIOS_TABLE_TYPE12 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("StringCount", "B"),
))
SMBIOS_TABLE_TYPE13 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("InstallableLanguages", "B"),
("Flags", "B"),
("Reserved", "15B"),
("CurrentLanguages", "O#SMBIOS_TABLE_STRING"),
))
SMBIOS_TABLE_TYPE14 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("GroupName", "O#SMBIOS_TABLE_STRING"),
("Group", "1O#GROUP_STRUCT"),
))
SMBIOS_TABLE_TYPE15 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("LogAreaLength", "H"),
("LogHeaderStartOffset", "H"),
("LogDataStartOffset", "H"),
("AccessMethod", "B"),
("LogStatus", "B"),
("LogChangeToken", "I"),
("AccessMethodAddress", "I"),
("LogHeaderFormat", "B"),
("NumberOfSupportedLogTypeDescriptors", "B"),
("LengthOfLogTypeDescriptor", "B"),
("EventLogTypeDescriptors", "1O#EVENT_LOG_TYPE"),
))
SMBIOS_TABLE_TYPE16 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Location", "B"),
("Use", "B"),
("MemoryErrorCorrection", "B"),
("MaximumCapacity", "I"),
("MemoryErrorInformationHandle", "H"),
("NumberOfMemoryDevices", "H"),
("ExtendedMaximumCapacity", "Q"),
))
SMBIOS_TABLE_TYPE17 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("MemoryArrayHandle", "H"),
("MemoryErrorInformationHandle", "H"),
("TotalWidth", "H"),
("DataWidth", "H"),
("Size", "H"),
("FormFactor", "B"),
("DeviceSet", "B"),
("DeviceLocator", "O#SMBIOS_TABLE_STRING"),
("BankLocator", "O#SMBIOS_TABLE_STRING"),
("MemoryType", "B"),
("TypeDetail", "O#MEMORY_DEVICE_TYPE_DETAIL"),
("Speed", "H"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTag", "O#SMBIOS_TABLE_STRING"),
("PartNumber", "O#SMBIOS_TABLE_STRING"),
("Attributes", "B"),
("ExtendedSize", "I"),
("ConfiguredMemoryClockSpeed", "H"),
("MinimumVoltage", "H"),
("MaximumVoltage", "H"),
("ConfiguredVoltage", "H"),
))
SMBIOS_TABLE_TYPE18 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ErrorType", "B"),
("ErrorGranularity", "B"),
("ErrorOperation", "B"),
("VendorSyndrome", "I"),
("MemoryArrayErrorAddress", "I"),
("DeviceErrorAddress", "I"),
("ErrorResolution", "I"),
))
SMBIOS_TABLE_TYPE19 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("StartingAddress", "I"),
("EndingAddress", "I"),
("MemoryArrayHandle", "H"),
("PartitionWidth", "B"),
("ExtendedStartingAddress", "Q"),
("ExtendedEndingAddress", "Q"),
))
SMBIOS_TABLE_TYPE20 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("StartingAddress", "I"),
("EndingAddress", "I"),
("MemoryDeviceHandle", "H"),
("MemoryArrayMappedAddressHandle", "H"),
("PartitionRowPosition", "B"),
("InterleavePosition", "B"),
("InterleavedDataDepth", "B"),
("ExtendedStartingAddress", "Q"),
("ExtendedEndingAddress", "Q"),
))
SMBIOS_TABLE_TYPE21 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Type", "B"),
("Interface", "B"),
("NumberOfButtons", "B"),
))
SMBIOS_TABLE_TYPE22 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Location", "O#SMBIOS_TABLE_STRING"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("ManufactureDate", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("DeviceName", "O#SMBIOS_TABLE_STRING"),
("DeviceChemistry", "B"),
("DeviceCapacity", "H"),
("DesignVoltage", "H"),
("SBDSVersionNumber", "O#SMBIOS_TABLE_STRING"),
("MaximumErrorInBatteryData", "B"),
("SBDSSerialNumber", "H"),
("SBDSManufactureDate", "H"),
("SBDSDeviceChemistry", "O#SMBIOS_TABLE_STRING"),
("DesignCapacityMultiplier", "B"),
("OEMSpecific", "I"),
))
SMBIOS_TABLE_TYPE23 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Capabilities", "B"),
("ResetCount", "H"),
("ResetLimit", "H"),
("TimerInterval", "H"),
("Timeout", "H"),
))
SMBIOS_TABLE_TYPE24 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("HardwareSecuritySettings", "B"),
))
SMBIOS_TABLE_TYPE25 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("NextScheduledPowerOnMonth", "B"),
("NextScheduledPowerOnDayOfMonth", "B"),
("NextScheduledPowerOnHour", "B"),
("NextScheduledPowerOnMinute", "B"),
("NextScheduledPowerOnSecond", "B"),
))
SMBIOS_TABLE_TYPE26 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("LocationAndStatus", "O#MISC_VOLTAGE_PROBE_LOCATION"),
("MaximumValue", "H"),
("MinimumValue", "H"),
("Resolution", "H"),
("Tolerance", "H"),
("Accuracy", "H"),
("OEMDefined", "I"),
("NominalValue", "H"),
))
SMBIOS_TABLE_TYPE27 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("TemperatureProbeHandle", "H"),
("DeviceTypeAndStatus", "O#MISC_COOLING_DEVICE_TYPE"),
("CoolingUnitGroup", "B"),
("OEMDefined", "I"),
("NominalSpeed", "H"),
("Description", "O#SMBIOS_TABLE_STRING"),
))
SMBIOS_TABLE_TYPE28 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("LocationAndStatus", "O#MISC_TEMPERATURE_PROBE_LOCATION"),
("MaximumValue", "H"),
("MinimumValue", "H"),
("Resolution", "H"),
("Tolerance", "H"),
("Accuracy", "H"),
("OEMDefined", "I"),
("NominalValue", "H"),
))
SMBIOS_TABLE_TYPE29 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("LocationAndStatus", "O#MISC_ELECTRICAL_CURRENT_PROBE_LOCATION"),
("MaximumValue", "H"),
("MinimumValue", "H"),
("Resolution", "H"),
("Tolerance", "H"),
("Accuracy", "H"),
("OEMDefined", "I"),
("NominalValue", "H"),
))
SMBIOS_TABLE_TYPE30 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ManufacturerName", "O#SMBIOS_TABLE_STRING"),
("Connections", "B"),
))
SMBIOS_TABLE_TYPE31 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Checksum", "B"),
("Reserved1", "B"),
("Reserved2", "H"),
("BisEntry16", "I"),
("BisEntry32", "I"),
("Reserved3", "Q"),
("Reserved4", "I"),
))
SMBIOS_TABLE_TYPE32 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Reserved", "6B"),
("BootStatus", "B"),
))
SMBIOS_TABLE_TYPE33 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ErrorType", "B"),
("ErrorGranularity", "B"),
("ErrorOperation", "B"),
("VendorSyndrome", "I"),
("MemoryArrayErrorAddress", "Q"),
("DeviceErrorAddress", "Q"),
("ErrorResolution", "I"),
))
SMBIOS_TABLE_TYPE34 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("Type", "B"),
("Address", "I"),
("AddressType", "B"),
))
SMBIOS_TABLE_TYPE35 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("ManagementDeviceHandle", "H"),
("ComponentHandle", "H"),
("ThresholdHandle", "H"),
))
SMBIOS_TABLE_TYPE36 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("LowerThresholdNonCritical", "H"),
("UpperThresholdNonCritical", "H"),
("LowerThresholdCritical", "H"),
("UpperThresholdCritical", "H"),
("LowerThresholdNonRecoverable", "H"),
("UpperThresholdNonRecoverable", "H"),
))
SMBIOS_TABLE_TYPE37 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ChannelType", "B"),
("MaximumChannelLoad", "B"),
("MemoryDeviceCount", "B"),
("MemoryDevice", "1O#MEMORY_DEVICE"),
))
SMBIOS_TABLE_TYPE38 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("InterfaceType", "B"),
("IPMISpecificationRevision", "B"),
("I2CSlaveAddress", "B"),
("NVStorageDeviceAddress", "B"),
("BaseAddress", "Q"),
("BaseAddressModifier_InterruptInfo", "B"),
("InterruptNumber", "B"),
))
SMBIOS_TABLE_TYPE39 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("PowerUnitGroup", "B"),
("Location", "O#SMBIOS_TABLE_STRING"),
("DeviceName", "O#SMBIOS_TABLE_STRING"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTagNumber", "O#SMBIOS_TABLE_STRING"),
("ModelPartNumber", "O#SMBIOS_TABLE_STRING"),
("RevisionLevel", "O#SMBIOS_TABLE_STRING"),
("MaxPowerCapacity", "H"),
("PowerSupplyCharacteristics", "O#SYS_POWER_SUPPLY_CHARACTERISTICS"),
("InputVoltageProbeHandle", "H"),
("CoolingDeviceHandle", "H"),
("InputCurrentProbeHandle", "H"),
))
SMBIOS_TABLE_TYPE40 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("NumberOfAdditionalInformationEntries", "B"),
("AdditionalInfoEntries", "1O#ADDITIONAL_INFORMATION_ENTRY"),
))
SMBIOS_TABLE_TYPE41 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ReferenceDesignation", "O#SMBIOS_TABLE_STRING"),
("DeviceType", "B"),
("DeviceTypeInstance", "B"),
("SegmentGroupNum", "H"),
("BusNum", "B"),
("DevFuncNum", "B"),
))
SMBIOS_TABLE_TYPE41 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ReferenceDesignation", "B"),
("DeviceType", "B"),
("DeviceTypeInstance", "B"),
("SegmentGroupNum", "H"),
("BusNum", "B"),
("DevFuncNum", "B"),
))
SMBIOS_TABLE_TYPE42 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("InterfaceType", "B"),
("MCHostInterfaceData", "B"),
))
SMBIOS_TABLE_TYPE43 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("VendorID", "4B"),
("MajorSpecVersion", "B"),
("MinorSpecVersion", "B"),
("FirmwareVersion1", "I"),
("FirmwareVersion2", "I"),
("Description", "O#SMBIOS_TABLE_STRING"),
("Characteristics", "Q"),
("OemDefined", "I"),
))
def GetSmbiosProtocol():
Infc = mem() # empty object just used to hold the protocol pointer
uefi.bs.LocateProtocol (gEfiSmbiosProtocolGuid, null, Infc.REF().REF())
Infc.CAST("O#EFI_SMBIOS_PROTOCOL") # typecast it so we can access its fields
return Infc
# Following code works too but it needs to allocate memory from heap.
#
# Infcp = mem("PO#EFI_SMBIOS_PROTOCOL")
# uefi.bs.LocateProtocol (gEfiSmbiosProtocolGuid, null, Infcp.REF())
# Infc = Infcp.DREF()
# Infcp.FREE() # remember to free
# return Infc
#
__PROT__ = GetSmbiosProtocol()
class SmbiosEntry(object):
ENTRIES = {}
STRINGS = {}
TYPES = {}
def __new__(Class, Entry, EntryType, StringList):
for Obj in Class.ENTRIES:
if Class.ENTRIES[Obj].ADDR == Entry.ADDR:
return Obj
return super(Class, SmbiosEntry).__new__(Class)
def __init__(self, Entry, EntryType, StringList):
if self in SmbiosEntry.ENTRIES:
return
SmbiosEntry.ENTRIES[self] = Entry
SmbiosEntry.STRINGS[self] = StringList
SmbiosEntry.TYPES[self] = EntryType
def __getattr__(self, Name):
Entry = SmbiosEntry.ENTRIES[self]
Strings = SmbiosEntry.STRINGS[self]
Value = getattr(Entry, Name)
Type = self._GetEntryType(Name)
if Type == "SMBIOS_TABLE_STRING" and Value > 0 and Value <= len(Strings):
return Strings[Value - 1]
elif type(Type) == OrderedDict:
return RecordEntry(Value, Type, Strings)
return Value
def __setattr__(self, Name, Value):
Entry = SmbiosEntry.ENTRIES[self]
Strings = SmbiosEntry.STRINGS[self]
if type(Value) == str:
Index = getattr(Entry, Name)
Strings[Index - 1] = Value
else:
setattr(Entry, Name, Value)
def _GetEntryType(self, EntryName):
TypeDef = SmbiosEntry.TYPES[self]
DefStr = TypeDef[EntryName]
try:
TypeStart = DefStr.index("O#")
DefStr = DefStr[TypeStart + 2:]
DefObj = eval(DefStr)
if type(DefObj) == OrderedDict:
return DefObj
except:
pass
return DefStr
def __iter__(self):
Entry = SmbiosEntry.ENTRIES[self]
return iter(Entry)
class SmbiosRecord(object):
#TRICK: MicroPython doesn't support object.__setattr__() to add object
# attributes if __setattr__ is overridden. We have to use class
# attributes to avoid infinite loop.
RECORDS = {}
STRINGS = {}
TYPES = {}
def __init__(self, RawData):
SmbiosRecord.RECORDS[self] = RawData
self._GetTypeDefinition()
self._ExtractStringList()
def __str__(self):
return "smbios.type%d[%x]" % (self.Hdr.Type, self.Hdr.Handle)
def _GetEntryType(self, TypeDef, EntryName):
DefStr = TypeDef[EntryName]
try:
TypeStart = DefStr.index("O#")
DefStr = DefStr[TypeStart + 2:]
DefObj = eval(DefStr)
if type(DefObj) == OrderedDict:
return DefObj
except:
pass
return DefStr
def _DumpEntries(self, Data, TypeDef, StringList, Indent=0):
if not Data:
return
ConvertArray = False
for Name in Data:
if type(Name) not in [str]:
if not ConvertArray:
ConvertArray = True
print("%s[%02x" % (' ' * Indent, Name), end='')
else:
print(", %02x" % Name, end='')
continue
Field = getattr(Data, Name, None)
if Field == None:
continue
if type(Field) == mem:
print("%s.%s" % (' ' * Indent, Name))
self._DumpEntries(Field, self._GetEntryType (TypeDef, Name), StringList, Indent + 2)
elif type(Field) == guid:
print("%s.%s = %s" % (' ' * Indent, Name, Field))
elif type(Field) == str:
print('%s.%s = "%s"' % (' ' * Indent, Name, Field))
else:
TypeStr = self._GetEntryType (TypeDef, Name)
if TypeStr == "SMBIOS_TABLE_STRING" and Field <= len(StringList):
Field = StringList[Field - 1]
print('%s.%s = "%s"' % (' ' * Indent, Name, Field))
else:
Field = hex(Field)
print("%s.%s = %s" % (' ' * Indent, Name, Field))
else:
if ConvertArray:
print("]")
def _GetTypeDefinition(self):
Data = SmbiosRecord.RECORDS[self]
TypeDef = eval("SMBIOS_TABLE_TYPE%s" % Data.Hdr.Type)
SmbiosRecord.TYPES[self] = TypeDef
def _ExtractStringList(self):
Data = SmbiosRecord.RECORDS[self]
Buf = mem(0, Data.ADDR + Data.Hdr.Length)
Index = 0
Start = Buf.ADDR
Length = 0
StringList = []
while (Buf[Index] != 0 or Buf[Index + 1] != 0):
Length += 1
if Buf[Index] == 0:
StrObj = mem("%da" % Length, Start)
StringList.append(StrObj.VALUE)
Start += Length
Length = 0
Index += 1
if Length > 0:
StrObj = mem("%da" % (Length + 1), Start)
StringList.append(StrObj.VALUE)
# Update size for the sake of strings
Data.SIZE = Data.Hdr.Length + Index + 2
SmbiosRecord.STRINGS[self] = StringList
def __getattr__(self, Name):
Data = SmbiosRecord.RECORDS[self]
TypeDef = SmbiosRecord.TYPES[self]
Strings = SmbiosRecord.STRINGS[self]
assert (Data != None)
Value = getattr(Data, Name)
Type = self._GetEntryType(TypeDef, Name)
if (Type == "SMBIOS_TABLE_STRING" and Value > 0 and Value <= len(Strings)):
return Strings[Value - 1]
elif type(Type) == OrderedDict:
return SmbiosEntry(Value, Type, Strings)
return Value
def __setattr__(self, Name, Value):
Data = SmbiosRecord.RECORDS[self]
assert (Data != None)
Strings = SmbiosRecord.STRINGS[self]
if type(Value) == str:
Index = getattr(Data, Name)
Strings[Index - 1] = Value
else:
setattr(Data, Name, Value)
def __iter__(self):
Data = SmbiosRecord.RECORDS[self]
return iter(Data)
def Publish(self):
Done = False
Data = SmbiosRecord.RECORDS[self]
Handle = mem("H")
Handle.VALUE = Data.Hdr.Handle
NewStringList = SmbiosRecord.STRINGS[self]
# Get old string list by parsing raw data again
self._ExtractStringList()
OldStringList = SmbiosRecord.STRINGS[self]
StringNumber = mem("N")
for Index in range(len(NewStringList)):
if NewStringList[Index] != OldStringList[Index]:
StringNumber.VALUE = Index + 1
SMBIOS._Protocol.UpdateString(SMBIOS._Protocol, Handle, StringNumber, NewStringList[Index])
Done = True
#
# Updating string will also update other record data. No more update
# needed if it just happened.
#
if not Done:
#
# Removal will free the memory of record data. We need to keep it in a
# new memory block in advance.
#
NewData = mem("%dB" % Data.SIZE)
NewData.VALUE = Data # This will do copying.
NewData.CAST("O#SMBIOS_TABLE_TYPE%d" % Data.Hdr.Type)
SMBIOS._Protocol.Remove(SMBIOS._Protocol, Handle)
SMBIOS._Protocol.Add(SMBIOS._Protocol, null, Handle, NewData)
# Once the record is updated, original record data memory will be freed
# and cannot be used any longer.
SmbiosRecord.RECORDS[self] = None
Handle.FREE()
StringNumber.FREE()
def Dump(self):
Data = SmbiosRecord.RECORDS[self]
TypeDef = SmbiosRecord.TYPES[self]
StringList = SmbiosRecord.STRINGS[self]
self._DumpEntries (Data, TypeDef, StringList)
class SmbiosHelperClass(object):
def __init__(self):
self._Protocol = __PROT__
def __str__(self):
return "smbios%d.%d" % (self._Protocol.MajorVersion, self._Protocol.MinorVersion)
def __getattr__(self, Name):
if not Name:
return None
if Name in ["MajorVersion", "MinorVersion"]:
return getattr(self._Protocol, Name)
if Name not in SMBIOS_TYPES:
return None
Type = "PO#SMBIOS_TABLE_TYPE%d" % SMBIOS_TYPES[Name]
SmbiosHandle = mem(SMBIOS_HANDLE)
SmbiosHandle.VALUE = 0xFFFE
RecordType = mem('B')
RecordType.VALUE = SMBIOS_TYPES[Name]
RecordPtr = mem("P")
Record = None
RecordList = []
while True:
try:
self._Protocol.GetNext (self._Protocol, SmbiosHandle.REF(), RecordType.REF(), RecordPtr.REF(), null)
except:
break
Record = RecordPtr.DREF(Type)
if Record.Hdr.Type == SMBIOS_TYPES[Name]:
RecordList.append(SmbiosRecord(Record))
SmbiosHandle.FREE()
RecordType.FREE()
return RecordList
SMBIOS = SmbiosHelperClass()
if __name__ == '__main__':
import sys
s=SMBIOS
print(s)
if len(sys.argv) == 0:
tlist = range(44)
else:
tlist = [int(sys.argv[0], 0)]
for t in tlist:
print("\n======== Type%d ========" % t)
for r in getattr(s, "Type%d" % t, []):
r.Dump()
print()
| 34.199005 | 117 | 0.500242 |
from _uefi import *
from ucollections import OrderedDict
from protocols import *
import uefi
SMBIOS_STRUCTURE = OrderedDict((
("Type", 'B'),
("Length", 'B'),
("Handle", 'H')
))
EFI_SMBIOS_PROTOCOL = OrderedDict((
("Add", 'FE(PO#EFI_SMBIOS_PROTOCOL,T,PH,PO#SMBIOS_STRUCTURE)'),
("UpdateString", 'FE(PO#EFI_SMBIOS_PROTOCOL,PH,PN,S)'),
("Remove", 'FE(PO#EFI_SMBIOS_PROTOCOL,H)'),
("GetNext", 'FE(PO#EFI_SMBIOS_PROTOCOL,PH,PB,PPO#SMBIOS_STRUCTURE,PT)'),
("MajorVersion", 'B'),
("MinorVersion", 'B')
))
SMBIOS_TYPES = {
"BIOS_INFORMATION" : 0,
"Type0" : 0,
"SYSTEM_INFORMATION" : 1,
"Type1" : 1,
"BASEBOARD_INFORMATION" : 2,
"Type2" : 2,
"SYSTEM_ENCLOSURE" : 3,
"Type3" : 3,
"PROCESSOR_INFORMATION" : 4,
"Type4" : 4,
"MEMORY_CONTROLLER_INFORMATION" : 5,
"Type5" : 5,
"MEMORY_MODULE_INFORMATON" : 6,
"Type6" : 6,
"CACHE_INFORMATION" : 7,
"Type7" : 7,
"PORT_CONNECTOR_INFORMATION" : 8,
"Type8" : 8,
"SYSTEM_SLOTS" : 9,
"Type9" : 9,
"ONBOARD_DEVICE_INFORMATION" : 10,
"Type10" : 10,
"OEM_STRINGS" : 11,
"Type11" : 11,
"SYSTEM_CONFIGURATION_OPTIONS" : 12,
"Type12" : 12,
"BIOS_LANGUAGE_INFORMATION" : 13,
"Type13" : 13,
"GROUP_ASSOCIATIONS" : 14,
"Type14" : 14,
"SYSTEM_EVENT_LOG" : 15,
"Type15" : 15,
"PHYSICAL_MEMORY_ARRAY" : 16,
"Type16" : 16,
"MEMORY_DEVICE" : 17,
"Type17" : 17,
"32BIT_MEMORY_ERROR_INFORMATION" : 18,
"Type18" : 18,
"MEMORY_ARRAY_MAPPED_ADDRESS" : 19,
"Type19" : 19,
"MEMORY_DEVICE_MAPPED_ADDRESS" : 20,
"Type20" : 20,
"BUILT_IN_POINTING_DEVICE" : 21,
"Type21" : 21,
"PORTABLE_BATTERY" : 22,
"Type22" : 22,
"SYSTEM_RESET" : 23,
"Type23" : 23,
"HARDWARE_SECURITY" : 24,
"Type24" : 24,
"SYSTEM_POWER_CONTROLS" : 25,
"Type25" : 25,
"VOLTAGE_PROBE" : 26,
"Type26" : 26,
"COOLING_DEVICE" : 27,
"Type27" : 27,
"TEMPERATURE_PROBE" : 28,
"Type28" : 28,
"ELECTRICAL_CURRENT_PROBE" : 29,
"Type29" : 29,
"OUT_OF_BAND_REMOTE_ACCESS" : 30,
"Type30" : 30,
"BOOT_INTEGRITY_SERVICE" : 31,
"Type31" : 31,
"SYSTEM_BOOT_INFORMATION" : 32,
"Type32" : 32,
"64BIT_MEMORY_ERROR_INFORMATION" : 33,
"Type33" : 33,
"MANAGEMENT_DEVICE" : 34,
"Type34" : 34,
"MANAGEMENT_DEVICE_COMPONENT" : 35,
"Type35" : 35,
"MANAGEMENT_DEVICE_THRESHOLD_DATA" : 36,
"Type36" : 36,
"MEMORY_CHANNEL" : 37,
"Type37" : 37,
"IPMI_DEVICE_INFORMATION" : 38,
"Type38" : 38,
"SYSTEM_POWER_SUPPLY" : 39,
"Type39" : 39,
"ADDITIONAL_INFORMATION" : 40,
"Type40" : 40,
"ONBOARD_DEVICES_EXTENDED_INFORMATION" : 41,
"Type41" : 41,
"MANAGEMENT_CONTROLLER_HOST_INTERFACE" : 42,
"Type42" : 42,
"TPM_DEVICE" : 43,
"Type43" : 43,
}
SMBIOS_HANDLE = "H"
SMBIOS_TABLE_STRING = "B"
MISC_BIOS_CHARACTERISTICS = OrderedDict((
("Reserved", "Q:2"),
("Unknown", "Q:1"),
("BiosCharacteristicsNotSupported", "Q:1"),
("IsaIsSupported", "Q:1"),
("McaIsSupported", "Q:1"),
("EisaIsSupported", "Q:1"),
("PciIsSupported", "Q:1"),
("PcmciaIsSupported", "Q:1"),
("PlugAndPlayIsSupported", "Q:1"),
("ApmIsSupported", "Q:1"),
("BiosIsUpgradable", "Q:1"),
("BiosShadowingAllowed", "Q:1"),
("VlVesaIsSupported", "Q:1"),
("EscdSupportIsAvailable", "Q:1"),
("BootFromCdIsSupported", "Q:1"),
("SelectableBootIsSupported", "Q:1"),
("RomBiosIsSocketed", "Q:1"),
("BootFromPcmciaIsSupported", "Q:1"),
("EDDSpecificationIsSupported", "Q:1"),
("JapaneseNecFloppyIsSupported", "Q:1"),
("JapaneseToshibaFloppyIsSupported", "Q:1"),
("Floppy525_360IsSupported", "Q:1"),
("Floppy525_12IsSupported", "Q:1"),
("Floppy35_720IsSupported", "Q:1"),
("Floppy35_288IsSupported", "Q:1"),
("PrintScreenIsSupported", "Q:1"),
("Keyboard8042IsSupported", "Q:1"),
("SerialIsSupported", "Q:1"),
("PrinterIsSupported", "Q:1"),
("CgaMonoIsSupported", "Q:1"),
("NecPc98", "Q:1"),
("ReservedForVendor", "Q:32"),
))
EXTENDED_BIOS_ROM_SIZE = OrderedDict((
("Size", "H:14"),
("Unit", "H:2")
))
BASE_BOARD_FEATURE_FLAGS = OrderedDict((
("Motherboard", "B:1"),
("RequiresDaughterCard", "B:1"),
("Removable", "B:1"),
("Replaceable", "B:1"),
("HotSwappable", "B:1"),
("Reserved", "B:3"),
))
CONTAINED_ELEMENT = OrderedDict((
("ContainedElementType", "B"),
("ContainedElementMinimum", "B"),
("ContainedElementMaximum", "B"),
))
PROCESSOR_VOLTAGE = OrderedDict((
("ProcessorVoltageCapability5V", "B:1"),
("ProcessorVoltageCapability3_3V", "B:1"),
("ProcessorVoltageCapability2_9V", "B:1"),
("ProcessorVoltageCapabilityReserved", "B:1"),
("ProcessorVoltageReserved", "B:3"),
("ProcessorVoltageIndicateLegacy", "B:1"),
))
PROCESSOR_SIGNATURE = OrderedDict((
("ProcessorSteppingId", "I:4"),
("ProcessorModel", "I:4"),
("ProcessorFamily", "I:4"),
("ProcessorType", "I:2"),
("ProcessorReserved1", "I:2"),
("ProcessorXModel", "I:4"),
("ProcessorXFamily", "I:8"),
("ProcessorReserved2", "I:4"),
))
PROCESSOR_FEATURE_FLAGS = OrderedDict((
("ProcessorFpu", "I:1"),
("ProcessorVme", "I:1"),
("ProcessorDe", "I:1"),
("ProcessorPse", "I:1"),
("ProcessorTsc", "I:1"),
("ProcessorMsr", "I:1"),
("ProcessorPae", "I:1"),
("ProcessorMce", "I:1"),
("ProcessorCx8", "I:1"),
("ProcessorApic", "I:1"),
("ProcessorReserved1", "I:1"),
("ProcessorSep", "I:1"),
("ProcessorMtrr", "I:1"),
("ProcessorPge", "I:1"),
("ProcessorMca", "I:1"),
("ProcessorCmov", "I:1"),
("ProcessorPat", "I:1"),
("ProcessorPse36", "I:1"),
("ProcessorPsn", "I:1"),
("ProcessorClfsh", "I:1"),
("ProcessorReserved2", "I:1"),
("ProcessorDs", "I:1"),
("ProcessorAcpi", "I:1"),
("ProcessorMmx", "I:1"),
("ProcessorFxsr", "I:1"),
("ProcessorSse", "I:1"),
("ProcessorSse2", "I:1"),
("ProcessorSs", "I:1"),
("ProcessorReserved3", "I:1"),
("ProcessorTm", "I:1"),
("ProcessorReserved4", "I:2"),
))
PROCESSOR_ID_DATA = OrderedDict((
("Signature", "O#PROCESSOR_SIGNATURE"),
("FeatureFlags", "O#PROCESSOR_FEATURE_FLAGS"),
))
MEMORY_ERROR_CORRECT_CAPABILITY = OrderedDict((
("Other", "B:1"),
("Unknown", "B:1"),
("None", "B:1"),
("SingleBitErrorCorrect", "B:1"),
("DoubleBitErrorCorrect", "B:1"),
("ErrorScrubbing", "B:1"),
("Reserved", "B:2"),
))
MEMORY_SPEED_TYPE = OrderedDict((
("Other", "H:1"),
("Unknown", "H:1"),
("SeventyNs", "H:1"),
("SixtyNs", "H:1"),
("FiftyNs", "H:1"),
("Reserved", "H:11"),
))
MEMORY_CURRENT_TYPE = OrderedDict((
("Other", "H:1"),
("Unknown", "H:1"),
("Standard", "H:1"),
("FastPageMode","H:1"),
("Edo", "H:1"),
("Parity", "H:1"),
("Ecc", "H:1"),
("Simm", "H:1"),
("Dimm", "H:1"),
("BurstEdo", "H:1"),
("Sdram", "H:1"),
("Reserved", "H:5"),
))
MEMORY_INSTALLED_ENABLED_SIZE = OrderedDict((
("InstalledOrEnabledSize", "B:7"),
("SingleOrDoubleBank", "B:1"),
))
CACHE_SRAM_TYPE_DATA = OrderedDict((
("Other", "H:1"),
("Unknown", "H:1"),
("NonBurst", "H:1"),
("Burst", "H:1"),
("PipelineBurst", "H:1"),
("Synchronous", "H:1"),
("Asynchronous", "H:1"),
("Reserved", "H:9"),
))
MISC_SLOT_CHARACTERISTICS1 = OrderedDict((
("CharacteristicsUnknown", "B:1"),
("Provides50Volts", "B:1"),
("Provides33Volts", "B:1"),
("SharedSlot", "B:1"),
("PcCard16Supported", "B:1"),
("CardBusSupported", "B:1"),
("ZoomVideoSupported", "B:1"),
("ModemRingResumeSupported","B:1"),
))
MISC_SLOT_CHARACTERISTICS2 = OrderedDict((
("PmeSignalSupported", "B:1"),
("HotPlugDevicesSupported", "B:1"),
("SmbusSignalSupported", "B:1"),
("Reserved", "B:5"),
))
DEVICE_STRUCT = OrderedDict((
("DeviceType", "B"),
("DescriptionString", "O#SMBIOS_TABLE_STRING"),
))
GROUP_STRUCT = OrderedDict((
("ItemType", "B"),
("ItemHandle", "H"),
))
EVENT_LOG_TYPE = OrderedDict((
("LogType", "B"),
("DataFormatType", "B"),
))
MEMORY_DEVICE_TYPE_DETAIL = OrderedDict((
("Reserved", "H:1"),
("Other", "H:1"),
("Unknown", "H:1"),
("FastPaged", "H:1"),
("StaticColumn","H:1"),
("PseudoStatic","H:1"),
("Rambus", "H:1"),
("Synchronous", "H:1"),
("Cmos", "H:1"),
("Edo", "H:1"),
("WindowDram", "H:1"),
("CacheDram", "H:1"),
("Nonvolatile", "H:1"),
("Registered", "H:1"),
("Unbuffered", "H:1"),
("LrDimm", "H:1"),
))
MISC_VOLTAGE_PROBE_LOCATION = OrderedDict((
("VoltageProbeSite", "B:5"),
("VoltageProbeStatus", "B:3"),
))
MISC_COOLING_DEVICE_TYPE = OrderedDict((
("CoolingDevice", "B:5"),
("CoolingDeviceStatus", "B:3"),
))
MISC_TEMPERATURE_PROBE_LOCATION = OrderedDict((
("TemperatureProbeSite", "B:5"),
("TemperatureProbeStatus", "B:3"),
))
MISC_ELECTRICAL_CURRENT_PROBE_LOCATION = OrderedDict((
("ElectricalCurrentProbeSite", "B:5"),
("ElectricalCurrentProbeStatus", "B:3"),
))
MEMORY_DEVICE = OrderedDict((
("DeviceLoad", "B"),
("DeviceHandle", "H"),
))
SYS_POWER_SUPPLY_CHARACTERISTICS = OrderedDict((
("PowerSupplyHotReplaceable", "H:1"),
("PowerSupplyPresent", "H:1"),
("PowerSupplyUnplugged", "H:1"),
("InputVoltageRangeSwitch", "H:4"),
("PowerSupplyStatus", "H:3"),
("PowerSupplyType", "H:4"),
("Reserved", "H:2"),
))
ADDITIONAL_INFORMATION_ENTRY = OrderedDict((
("EntryLength", "B"),
("ReferencedHandle", "H"),
("ReferencedOffset", "B"),
("EntryString", "O#SMBIOS_TABLE_STRING"),
("Value", "1B"),
))
SMBIOS_TABLE_TYPE0 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Vendor", "O#SMBIOS_TABLE_STRING"),
("BiosVersion", "O#SMBIOS_TABLE_STRING"),
("BiosSegment", "H"),
("BiosReleaseDate", "O#SMBIOS_TABLE_STRING"),
("BiosSize", "B"),
("BiosCharacteristics", "O#MISC_BIOS_CHARACTERISTICS"),
("BIOSCharacteristicsExtensionBytes", "2B"),
("SystemBiosMajorRelease", "B"),
("SystemBiosMinorRelease", "B"),
("EmbeddedControllerFirmwareMajorRelease", "B"),
("EmbeddedControllerFirmwareMinorRelease", "B"),
("ExtendedBiosSize", "O#EXTENDED_BIOS_ROM_SIZE"),
))
SMBIOS_TABLE_TYPE1 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("ProductName", "O#SMBIOS_TABLE_STRING"),
("Version", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("Uuid", "G"),
("WakeUpType", "B"),
("SKUNumber", "O#SMBIOS_TABLE_STRING"),
("Family", "O#SMBIOS_TABLE_STRING"),
))
SMBIOS_TABLE_TYPE2 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("ProductName", "O#SMBIOS_TABLE_STRING"),
("Version", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTag", "O#SMBIOS_TABLE_STRING"),
("FeatureFlag", "O#BASE_BOARD_FEATURE_FLAGS"),
("LocationInChassis", "O#SMBIOS_TABLE_STRING"),
("ChassisHandle", "H"),
("BoardType", "B"),
("NumberOfContainedObjectHandles", "B"),
("ContainedObjectHandles", "1H"),
))
SMBIOS_TABLE_TYPE3 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("Type", "B"),
("Version", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTag", "O#SMBIOS_TABLE_STRING"),
("BootupState", "B"),
("PowerSupplyState", "B"),
("ThermalState", "B"),
("SecurityStatus", "B"),
("OemDefined", "4B"),
("Height", "B"),
("NumberofPowerCords", "B"),
("ContainedElementCount", "B"),
("ContainedElementRecordLength","B"),
("ContainedElements", "1O#CONTAINED_ELEMENT"),
))
SMBIOS_TABLE_TYPE4 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Socket", "O#SMBIOS_TABLE_STRING"),
("ProcessorType", "B"),
("ProcessorFamily", "B"),
("ProcessorManufacture", "O#SMBIOS_TABLE_STRING"),
("ProcessorId", "O#PROCESSOR_ID_DATA"),
("ProcessorVersion", "O#SMBIOS_TABLE_STRING"),
("Voltage", "O#PROCESSOR_VOLTAGE"),
("ExternalClock", "H"),
("MaxSpeed", "H"),
("CurrentSpeed", "H"),
("Status", "B"),
("ProcessorUpgrade", "B"),
("L1CacheHandle", "H"),
("L2CacheHandle", "H"),
("L3CacheHandle", "H"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTag", "O#SMBIOS_TABLE_STRING"),
("PartNumber", "B"),
("CoreCount", "B"),
("EnabledCoreCount", "B"),
("ThreadCount", "B"),
("ProcessorCharacteristics", "H"),
("ProcessorFamily2", "H"),
("CoreCount2", "H"),
("EnabledCoreCount2", "H"),
("ThreadCount2", "H"),
))
SMBIOS_TABLE_TYPE5 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ErrDetectMethod", "B"),
("ErrCorrectCapability", "O#MEMORY_ERROR_CORRECT_CAPABILITY"),
("SupportInterleave", "B"),
("CurrentInterleave", "B"),
("MaxMemoryModuleSize", "B"),
("SupportSpeed", "O#MEMORY_SPEED_TYPE"),
("SupportMemoryType", "H"),
("MemoryModuleVoltage", "B"),
("AssociatedMemorySlotNum", "B"),
("MemoryModuleConfigHandles", "1H"),
))
SMBIOS_TABLE_TYPE6 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("SocketDesignation", "O#SMBIOS_TABLE_STRING"),
("BankConnections", "B"),
("CurrentSpeed", "B"),
("CurrentMemoryType", "O#MEMORY_CURRENT_TYPE"),
("InstalledSize", "O#MEMORY_INSTALLED_ENABLED_SIZE"),
("EnabledSize", "O#MEMORY_INSTALLED_ENABLED_SIZE"),
("ErrorStatus", "B"),
))
SMBIOS_TABLE_TYPE7 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("SocketDesignation", "O#SMBIOS_TABLE_STRING"),
("CacheConfiguration", "H"),
("MaximumCacheSize", "H"),
("InstalledSize", "H"),
("SupportedSRAMType", "O#CACHE_SRAM_TYPE_DATA"),
("CurrentSRAMType", "O#CACHE_SRAM_TYPE_DATA"),
("CacheSpeed", "B"),
("ErrorCorrectionType", "B"),
("SystemCacheType", "B"),
("Associativity", "B"),
("MaximumCacheSize2", "I"),
("InstalledSize2", "I"),
))
SMBIOS_TABLE_TYPE8 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("InternalReferenceDesignator", "O#SMBIOS_TABLE_STRING"),
("InternalConnectorType", "B"),
("ExternalReferenceDesignator", "O#SMBIOS_TABLE_STRING"),
("ExternalConnectorType", "B"),
("PortType", "B"),
))
SMBIOS_TABLE_TYPE9 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("SlotDesignation", "O#SMBIOS_TABLE_STRING"),
("SlotType", "B"),
("SlotDataBusWidth", "B"),
("CurrentUsage", "B"),
("SlotLength", "B"),
("SlotID", "H"),
("SlotCharacteristics1", "O#MISC_SLOT_CHARACTERISTICS1"),
("SlotCharacteristics2", "O#MISC_SLOT_CHARACTERISTICS2"),
("SegmentGroupNum", "H"),
("BusNum", "B"),
("DevFuncNum", "B"),
))
SMBIOS_TABLE_TYPE10 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Device", "1O#DEVICE_STRUCT"),
))
SMBIOS_TABLE_TYPE11 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("StringCount", "B"),
))
SMBIOS_TABLE_TYPE12 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("StringCount", "B"),
))
SMBIOS_TABLE_TYPE13 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("InstallableLanguages", "B"),
("Flags", "B"),
("Reserved", "15B"),
("CurrentLanguages", "O#SMBIOS_TABLE_STRING"),
))
SMBIOS_TABLE_TYPE14 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("GroupName", "O#SMBIOS_TABLE_STRING"),
("Group", "1O#GROUP_STRUCT"),
))
SMBIOS_TABLE_TYPE15 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("LogAreaLength", "H"),
("LogHeaderStartOffset", "H"),
("LogDataStartOffset", "H"),
("AccessMethod", "B"),
("LogStatus", "B"),
("LogChangeToken", "I"),
("AccessMethodAddress", "I"),
("LogHeaderFormat", "B"),
("NumberOfSupportedLogTypeDescriptors", "B"),
("LengthOfLogTypeDescriptor", "B"),
("EventLogTypeDescriptors", "1O#EVENT_LOG_TYPE"),
))
SMBIOS_TABLE_TYPE16 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Location", "B"),
("Use", "B"),
("MemoryErrorCorrection", "B"),
("MaximumCapacity", "I"),
("MemoryErrorInformationHandle", "H"),
("NumberOfMemoryDevices", "H"),
("ExtendedMaximumCapacity", "Q"),
))
SMBIOS_TABLE_TYPE17 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("MemoryArrayHandle", "H"),
("MemoryErrorInformationHandle", "H"),
("TotalWidth", "H"),
("DataWidth", "H"),
("Size", "H"),
("FormFactor", "B"),
("DeviceSet", "B"),
("DeviceLocator", "O#SMBIOS_TABLE_STRING"),
("BankLocator", "O#SMBIOS_TABLE_STRING"),
("MemoryType", "B"),
("TypeDetail", "O#MEMORY_DEVICE_TYPE_DETAIL"),
("Speed", "H"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTag", "O#SMBIOS_TABLE_STRING"),
("PartNumber", "O#SMBIOS_TABLE_STRING"),
("Attributes", "B"),
("ExtendedSize", "I"),
("ConfiguredMemoryClockSpeed", "H"),
("MinimumVoltage", "H"),
("MaximumVoltage", "H"),
("ConfiguredVoltage", "H"),
))
SMBIOS_TABLE_TYPE18 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ErrorType", "B"),
("ErrorGranularity", "B"),
("ErrorOperation", "B"),
("VendorSyndrome", "I"),
("MemoryArrayErrorAddress", "I"),
("DeviceErrorAddress", "I"),
("ErrorResolution", "I"),
))
SMBIOS_TABLE_TYPE19 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("StartingAddress", "I"),
("EndingAddress", "I"),
("MemoryArrayHandle", "H"),
("PartitionWidth", "B"),
("ExtendedStartingAddress", "Q"),
("ExtendedEndingAddress", "Q"),
))
SMBIOS_TABLE_TYPE20 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("StartingAddress", "I"),
("EndingAddress", "I"),
("MemoryDeviceHandle", "H"),
("MemoryArrayMappedAddressHandle", "H"),
("PartitionRowPosition", "B"),
("InterleavePosition", "B"),
("InterleavedDataDepth", "B"),
("ExtendedStartingAddress", "Q"),
("ExtendedEndingAddress", "Q"),
))
SMBIOS_TABLE_TYPE21 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Type", "B"),
("Interface", "B"),
("NumberOfButtons", "B"),
))
SMBIOS_TABLE_TYPE22 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Location", "O#SMBIOS_TABLE_STRING"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("ManufactureDate", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("DeviceName", "O#SMBIOS_TABLE_STRING"),
("DeviceChemistry", "B"),
("DeviceCapacity", "H"),
("DesignVoltage", "H"),
("SBDSVersionNumber", "O#SMBIOS_TABLE_STRING"),
("MaximumErrorInBatteryData", "B"),
("SBDSSerialNumber", "H"),
("SBDSManufactureDate", "H"),
("SBDSDeviceChemistry", "O#SMBIOS_TABLE_STRING"),
("DesignCapacityMultiplier", "B"),
("OEMSpecific", "I"),
))
SMBIOS_TABLE_TYPE23 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Capabilities", "B"),
("ResetCount", "H"),
("ResetLimit", "H"),
("TimerInterval", "H"),
("Timeout", "H"),
))
SMBIOS_TABLE_TYPE24 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("HardwareSecuritySettings", "B"),
))
SMBIOS_TABLE_TYPE25 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("NextScheduledPowerOnMonth", "B"),
("NextScheduledPowerOnDayOfMonth", "B"),
("NextScheduledPowerOnHour", "B"),
("NextScheduledPowerOnMinute", "B"),
("NextScheduledPowerOnSecond", "B"),
))
SMBIOS_TABLE_TYPE26 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("LocationAndStatus", "O#MISC_VOLTAGE_PROBE_LOCATION"),
("MaximumValue", "H"),
("MinimumValue", "H"),
("Resolution", "H"),
("Tolerance", "H"),
("Accuracy", "H"),
("OEMDefined", "I"),
("NominalValue", "H"),
))
SMBIOS_TABLE_TYPE27 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("TemperatureProbeHandle", "H"),
("DeviceTypeAndStatus", "O#MISC_COOLING_DEVICE_TYPE"),
("CoolingUnitGroup", "B"),
("OEMDefined", "I"),
("NominalSpeed", "H"),
("Description", "O#SMBIOS_TABLE_STRING"),
))
SMBIOS_TABLE_TYPE28 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("LocationAndStatus", "O#MISC_TEMPERATURE_PROBE_LOCATION"),
("MaximumValue", "H"),
("MinimumValue", "H"),
("Resolution", "H"),
("Tolerance", "H"),
("Accuracy", "H"),
("OEMDefined", "I"),
("NominalValue", "H"),
))
SMBIOS_TABLE_TYPE29 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("LocationAndStatus", "O#MISC_ELECTRICAL_CURRENT_PROBE_LOCATION"),
("MaximumValue", "H"),
("MinimumValue", "H"),
("Resolution", "H"),
("Tolerance", "H"),
("Accuracy", "H"),
("OEMDefined", "I"),
("NominalValue", "H"),
))
SMBIOS_TABLE_TYPE30 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ManufacturerName", "O#SMBIOS_TABLE_STRING"),
("Connections", "B"),
))
SMBIOS_TABLE_TYPE31 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Checksum", "B"),
("Reserved1", "B"),
("Reserved2", "H"),
("BisEntry16", "I"),
("BisEntry32", "I"),
("Reserved3", "Q"),
("Reserved4", "I"),
))
SMBIOS_TABLE_TYPE32 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Reserved", "6B"),
("BootStatus", "B"),
))
SMBIOS_TABLE_TYPE33 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ErrorType", "B"),
("ErrorGranularity", "B"),
("ErrorOperation", "B"),
("VendorSyndrome", "I"),
("MemoryArrayErrorAddress", "Q"),
("DeviceErrorAddress", "Q"),
("ErrorResolution", "I"),
))
SMBIOS_TABLE_TYPE34 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("Type", "B"),
("Address", "I"),
("AddressType", "B"),
))
SMBIOS_TABLE_TYPE35 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("Description", "O#SMBIOS_TABLE_STRING"),
("ManagementDeviceHandle", "H"),
("ComponentHandle", "H"),
("ThresholdHandle", "H"),
))
SMBIOS_TABLE_TYPE36 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("LowerThresholdNonCritical", "H"),
("UpperThresholdNonCritical", "H"),
("LowerThresholdCritical", "H"),
("UpperThresholdCritical", "H"),
("LowerThresholdNonRecoverable", "H"),
("UpperThresholdNonRecoverable", "H"),
))
SMBIOS_TABLE_TYPE37 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ChannelType", "B"),
("MaximumChannelLoad", "B"),
("MemoryDeviceCount", "B"),
("MemoryDevice", "1O#MEMORY_DEVICE"),
))
SMBIOS_TABLE_TYPE38 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("InterfaceType", "B"),
("IPMISpecificationRevision", "B"),
("I2CSlaveAddress", "B"),
("NVStorageDeviceAddress", "B"),
("BaseAddress", "Q"),
("BaseAddressModifier_InterruptInfo", "B"),
("InterruptNumber", "B"),
))
SMBIOS_TABLE_TYPE39 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("PowerUnitGroup", "B"),
("Location", "O#SMBIOS_TABLE_STRING"),
("DeviceName", "O#SMBIOS_TABLE_STRING"),
("Manufacturer", "O#SMBIOS_TABLE_STRING"),
("SerialNumber", "O#SMBIOS_TABLE_STRING"),
("AssetTagNumber", "O#SMBIOS_TABLE_STRING"),
("ModelPartNumber", "O#SMBIOS_TABLE_STRING"),
("RevisionLevel", "O#SMBIOS_TABLE_STRING"),
("MaxPowerCapacity", "H"),
("PowerSupplyCharacteristics", "O#SYS_POWER_SUPPLY_CHARACTERISTICS"),
("InputVoltageProbeHandle", "H"),
("CoolingDeviceHandle", "H"),
("InputCurrentProbeHandle", "H"),
))
SMBIOS_TABLE_TYPE40 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("NumberOfAdditionalInformationEntries", "B"),
("AdditionalInfoEntries", "1O#ADDITIONAL_INFORMATION_ENTRY"),
))
SMBIOS_TABLE_TYPE41 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ReferenceDesignation", "O#SMBIOS_TABLE_STRING"),
("DeviceType", "B"),
("DeviceTypeInstance", "B"),
("SegmentGroupNum", "H"),
("BusNum", "B"),
("DevFuncNum", "B"),
))
SMBIOS_TABLE_TYPE41 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("ReferenceDesignation", "B"),
("DeviceType", "B"),
("DeviceTypeInstance", "B"),
("SegmentGroupNum", "H"),
("BusNum", "B"),
("DevFuncNum", "B"),
))
SMBIOS_TABLE_TYPE42 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("InterfaceType", "B"),
("MCHostInterfaceData", "B"),
))
SMBIOS_TABLE_TYPE43 = OrderedDict((
("Hdr", "O#SMBIOS_STRUCTURE"),
("VendorID", "4B"),
("MajorSpecVersion", "B"),
("MinorSpecVersion", "B"),
("FirmwareVersion1", "I"),
("FirmwareVersion2", "I"),
("Description", "O#SMBIOS_TABLE_STRING"),
("Characteristics", "Q"),
("OemDefined", "I"),
))
def GetSmbiosProtocol():
Infc = mem()
uefi.bs.LocateProtocol (gEfiSmbiosProtocolGuid, null, Infc.REF().REF())
Infc.CAST("O#EFI_SMBIOS_PROTOCOL")
return Infc
__PROT__ = GetSmbiosProtocol()
class SmbiosEntry(object):
ENTRIES = {}
STRINGS = {}
TYPES = {}
def __new__(Class, Entry, EntryType, StringList):
for Obj in Class.ENTRIES:
if Class.ENTRIES[Obj].ADDR == Entry.ADDR:
return Obj
return super(Class, SmbiosEntry).__new__(Class)
def __init__(self, Entry, EntryType, StringList):
if self in SmbiosEntry.ENTRIES:
return
SmbiosEntry.ENTRIES[self] = Entry
SmbiosEntry.STRINGS[self] = StringList
SmbiosEntry.TYPES[self] = EntryType
def __getattr__(self, Name):
Entry = SmbiosEntry.ENTRIES[self]
Strings = SmbiosEntry.STRINGS[self]
Value = getattr(Entry, Name)
Type = self._GetEntryType(Name)
if Type == "SMBIOS_TABLE_STRING" and Value > 0 and Value <= len(Strings):
return Strings[Value - 1]
elif type(Type) == OrderedDict:
return RecordEntry(Value, Type, Strings)
return Value
def __setattr__(self, Name, Value):
Entry = SmbiosEntry.ENTRIES[self]
Strings = SmbiosEntry.STRINGS[self]
if type(Value) == str:
Index = getattr(Entry, Name)
Strings[Index - 1] = Value
else:
setattr(Entry, Name, Value)
def _GetEntryType(self, EntryName):
TypeDef = SmbiosEntry.TYPES[self]
DefStr = TypeDef[EntryName]
try:
TypeStart = DefStr.index("O#")
DefStr = DefStr[TypeStart + 2:]
DefObj = eval(DefStr)
if type(DefObj) == OrderedDict:
return DefObj
except:
pass
return DefStr
def __iter__(self):
Entry = SmbiosEntry.ENTRIES[self]
return iter(Entry)
class SmbiosRecord(object):
# attributes if __setattr__ is overridden. We have to use class
# attributes to avoid infinite loop.
RECORDS = {}
STRINGS = {}
TYPES = {}
def __init__(self, RawData):
SmbiosRecord.RECORDS[self] = RawData
self._GetTypeDefinition()
self._ExtractStringList()
def __str__(self):
return "smbios.type%d[%x]" % (self.Hdr.Type, self.Hdr.Handle)
def _GetEntryType(self, TypeDef, EntryName):
DefStr = TypeDef[EntryName]
try:
TypeStart = DefStr.index("O#")
DefStr = DefStr[TypeStart + 2:]
DefObj = eval(DefStr)
if type(DefObj) == OrderedDict:
return DefObj
except:
pass
return DefStr
def _DumpEntries(self, Data, TypeDef, StringList, Indent=0):
if not Data:
return
ConvertArray = False
for Name in Data:
if type(Name) not in [str]:
if not ConvertArray:
ConvertArray = True
print("%s[%02x" % (' ' * Indent, Name), end='')
else:
print(", %02x" % Name, end='')
continue
Field = getattr(Data, Name, None)
if Field == None:
continue
if type(Field) == mem:
print("%s.%s" % (' ' * Indent, Name))
self._DumpEntries(Field, self._GetEntryType (TypeDef, Name), StringList, Indent + 2)
elif type(Field) == guid:
print("%s.%s = %s" % (' ' * Indent, Name, Field))
elif type(Field) == str:
print('%s.%s = "%s"' % (' ' * Indent, Name, Field))
else:
TypeStr = self._GetEntryType (TypeDef, Name)
if TypeStr == "SMBIOS_TABLE_STRING" and Field <= len(StringList):
Field = StringList[Field - 1]
print('%s.%s = "%s"' % (' ' * Indent, Name, Field))
else:
Field = hex(Field)
print("%s.%s = %s" % (' ' * Indent, Name, Field))
else:
if ConvertArray:
print("]")
def _GetTypeDefinition(self):
Data = SmbiosRecord.RECORDS[self]
TypeDef = eval("SMBIOS_TABLE_TYPE%s" % Data.Hdr.Type)
SmbiosRecord.TYPES[self] = TypeDef
def _ExtractStringList(self):
Data = SmbiosRecord.RECORDS[self]
Buf = mem(0, Data.ADDR + Data.Hdr.Length)
Index = 0
Start = Buf.ADDR
Length = 0
StringList = []
while (Buf[Index] != 0 or Buf[Index + 1] != 0):
Length += 1
if Buf[Index] == 0:
StrObj = mem("%da" % Length, Start)
StringList.append(StrObj.VALUE)
Start += Length
Length = 0
Index += 1
if Length > 0:
StrObj = mem("%da" % (Length + 1), Start)
StringList.append(StrObj.VALUE)
# Update size for the sake of strings
Data.SIZE = Data.Hdr.Length + Index + 2
SmbiosRecord.STRINGS[self] = StringList
def __getattr__(self, Name):
Data = SmbiosRecord.RECORDS[self]
TypeDef = SmbiosRecord.TYPES[self]
Strings = SmbiosRecord.STRINGS[self]
assert (Data != None)
Value = getattr(Data, Name)
Type = self._GetEntryType(TypeDef, Name)
if (Type == "SMBIOS_TABLE_STRING" and Value > 0 and Value <= len(Strings)):
return Strings[Value - 1]
elif type(Type) == OrderedDict:
return SmbiosEntry(Value, Type, Strings)
return Value
def __setattr__(self, Name, Value):
Data = SmbiosRecord.RECORDS[self]
assert (Data != None)
Strings = SmbiosRecord.STRINGS[self]
if type(Value) == str:
Index = getattr(Data, Name)
Strings[Index - 1] = Value
else:
setattr(Data, Name, Value)
def __iter__(self):
Data = SmbiosRecord.RECORDS[self]
return iter(Data)
def Publish(self):
Done = False
Data = SmbiosRecord.RECORDS[self]
Handle = mem("H")
Handle.VALUE = Data.Hdr.Handle
NewStringList = SmbiosRecord.STRINGS[self]
# Get old string list by parsing raw data again
self._ExtractStringList()
OldStringList = SmbiosRecord.STRINGS[self]
StringNumber = mem("N")
for Index in range(len(NewStringList)):
if NewStringList[Index] != OldStringList[Index]:
StringNumber.VALUE = Index + 1
SMBIOS._Protocol.UpdateString(SMBIOS._Protocol, Handle, StringNumber, NewStringList[Index])
Done = True
#
# Updating string will also update other record data. No more update
# needed if it just happened.
#
if not Done:
#
# Removal will free the memory of record data. We need to keep it in a
# new memory block in advance.
#
NewData = mem("%dB" % Data.SIZE)
NewData.VALUE = Data # This will do copying.
NewData.CAST("O#SMBIOS_TABLE_TYPE%d" % Data.Hdr.Type)
SMBIOS._Protocol.Remove(SMBIOS._Protocol, Handle)
SMBIOS._Protocol.Add(SMBIOS._Protocol, null, Handle, NewData)
# Once the record is updated, original record data memory will be freed
# and cannot be used any longer.
SmbiosRecord.RECORDS[self] = None
Handle.FREE()
StringNumber.FREE()
def Dump(self):
Data = SmbiosRecord.RECORDS[self]
TypeDef = SmbiosRecord.TYPES[self]
StringList = SmbiosRecord.STRINGS[self]
self._DumpEntries (Data, TypeDef, StringList)
class SmbiosHelperClass(object):
def __init__(self):
self._Protocol = __PROT__
def __str__(self):
return "smbios%d.%d" % (self._Protocol.MajorVersion, self._Protocol.MinorVersion)
def __getattr__(self, Name):
if not Name:
return None
if Name in ["MajorVersion", "MinorVersion"]:
return getattr(self._Protocol, Name)
if Name not in SMBIOS_TYPES:
return None
Type = "PO#SMBIOS_TABLE_TYPE%d" % SMBIOS_TYPES[Name]
SmbiosHandle = mem(SMBIOS_HANDLE)
SmbiosHandle.VALUE = 0xFFFE
RecordType = mem('B')
RecordType.VALUE = SMBIOS_TYPES[Name]
RecordPtr = mem("P")
Record = None
RecordList = []
while True:
try:
self._Protocol.GetNext (self._Protocol, SmbiosHandle.REF(), RecordType.REF(), RecordPtr.REF(), null)
except:
break
Record = RecordPtr.DREF(Type)
if Record.Hdr.Type == SMBIOS_TYPES[Name]:
RecordList.append(SmbiosRecord(Record))
SmbiosHandle.FREE()
RecordType.FREE()
return RecordList
SMBIOS = SmbiosHelperClass()
if __name__ == '__main__':
import sys
s=SMBIOS
print(s)
if len(sys.argv) == 0:
tlist = range(44)
else:
tlist = [int(sys.argv[0], 0)]
for t in tlist:
print("\n======== Type%d ========" % t)
for r in getattr(s, "Type%d" % t, []):
r.Dump()
print()
| true | true |
f7fd97803538e02e683e7c280d66d692a0f79aa0 | 728 | py | Python | pearl/nacre/handle.py | dynosaur72/pearl | 5fe929f9feabb91d4e183aea8b8908380fad3208 | [
"MIT"
] | 11 | 2017-11-24T02:19:18.000Z | 2021-08-24T21:00:45.000Z | pearl/nacre/handle.py | dynosaur72/pearl | 5fe929f9feabb91d4e183aea8b8908380fad3208 | [
"MIT"
] | 8 | 2017-11-23T20:24:41.000Z | 2020-03-06T20:29:43.000Z | pearl/nacre/handle.py | dynosaur72/pearl | 5fe929f9feabb91d4e183aea8b8908380fad3208 | [
"MIT"
] | 15 | 2017-11-23T20:08:09.000Z | 2020-06-18T21:53:24.000Z | import re
import hangups
def isEventNotification(update):
if update.event_notification:
return True
return False
def isMessageEvent(update):
if isEventNotification(update):
event = update.event_notification.event
if event.event_type == hangups.hangouts_pb2.EVENT_TYPE_REGULAR_CHAT_MESSAGE:
return True
return False
def newConversationFilter(conversationIdList):
return lambda event: hangups.ConversationEvent(event).conversation_id in conversationIdList
def newMessageFilter(regex):
pattern = re.compile(regex)
return lambda event: bool(pattern.match(hangups.ChatMessageEvent(event).text))
def newUserFilter(gaiaIdList):
return lambda event: hangups.ConversationEvent(event).user_id.gaia_id in gaiaIdList
| 28 | 92 | 0.822802 | import re
import hangups
def isEventNotification(update):
if update.event_notification:
return True
return False
def isMessageEvent(update):
if isEventNotification(update):
event = update.event_notification.event
if event.event_type == hangups.hangouts_pb2.EVENT_TYPE_REGULAR_CHAT_MESSAGE:
return True
return False
def newConversationFilter(conversationIdList):
return lambda event: hangups.ConversationEvent(event).conversation_id in conversationIdList
def newMessageFilter(regex):
pattern = re.compile(regex)
return lambda event: bool(pattern.match(hangups.ChatMessageEvent(event).text))
def newUserFilter(gaiaIdList):
return lambda event: hangups.ConversationEvent(event).user_id.gaia_id in gaiaIdList
| true | true |
f7fd97d1bbd762598167b0efb2e5645c5f7b4642 | 1,245 | py | Python | model-optimizer/mo/front/kaldi/extractors/elementwise_component_ext.py | Andruxin52rus/openvino | d824e371fe7dffb90e6d3d58e4e34adecfce4606 | [
"Apache-2.0"
] | 2 | 2020-11-18T14:14:06.000Z | 2020-11-28T04:55:57.000Z | model-optimizer/mo/front/kaldi/extractors/elementwise_component_ext.py | Andruxin52rus/openvino | d824e371fe7dffb90e6d3d58e4e34adecfce4606 | [
"Apache-2.0"
] | 30 | 2020-11-13T11:44:07.000Z | 2022-02-21T13:03:16.000Z | model-optimizer/mo/front/kaldi/extractors/elementwise_component_ext.py | mmakridi/openvino | 769bb7709597c14debdaa356dd60c5a78bdfa97e | [
"Apache-2.0"
] | 3 | 2021-03-09T08:27:29.000Z | 2021-04-07T04:58:54.000Z | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.extractor import FrontExtractorOp
from mo.front.kaldi.utils import read_token_value
from mo.ops.eltwise_ninputs_in_1 import EltwiseNin1
class ElementwiseProductComponentFrontExtractor(FrontExtractorOp):
op = 'elementwiseproductcomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
indim = read_token_value(pb, b'<InputDim>')
outdim = read_token_value(pb, b'<OutputDim>')
num_inputs = indim / outdim
attrs = {'num_inputs': int(num_inputs),
'operation': 'mul'}
EltwiseNin1.update_node_stat(node, attrs)
return cls.enabled
| 32.763158 | 73 | 0.727711 | from mo.front.extractor import FrontExtractorOp
from mo.front.kaldi.utils import read_token_value
from mo.ops.eltwise_ninputs_in_1 import EltwiseNin1
class ElementwiseProductComponentFrontExtractor(FrontExtractorOp):
op = 'elementwiseproductcomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
indim = read_token_value(pb, b'<InputDim>')
outdim = read_token_value(pb, b'<OutputDim>')
num_inputs = indim / outdim
attrs = {'num_inputs': int(num_inputs),
'operation': 'mul'}
EltwiseNin1.update_node_stat(node, attrs)
return cls.enabled
| true | true |
f7fd97d7349167e4069b5e2cce8d20b8cdfac5d2 | 4,185 | py | Python | src/collector.py | migvanderlei/dataset-parser | b6febccbdc829737e50640d980b2034d2c54c95a | [
"MIT"
] | null | null | null | src/collector.py | migvanderlei/dataset-parser | b6febccbdc829737e50640d980b2034d2c54c95a | [
"MIT"
] | null | null | null | src/collector.py | migvanderlei/dataset-parser | b6febccbdc829737e50640d980b2034d2c54c95a | [
"MIT"
] | null | null | null | import os
import json
import logging
from pandas import json_normalize
from src.configurable import Configurable
from datetime import datetime
from glob import glob
from concurrent.futures import ThreadPoolExecutor, as_completed
class Collector(Configurable):
def __init__(self, input_path=None, output_file="collected-{}.csv", config_file=None, output_base_dir="./generated"):
Configurable.__init__(self, config_file)
logging.basicConfig(format="%(asctime)s: [%(levelname)s] %(message)s", level=logging.INFO,
datefmt="%H:%M:%S", filename=self.get_log_file('collector'))
if input_path is None:
self.input_path = self.config.get("outputPath")
else:
self.input_path = input_path
self.output_file = output_file.format(self.get_timestamp())
max_threads = self.config.get("maxThreadCount")
self.max_threads = max_threads if max_threads is not None else 50
self.output_file_path = self.get_output_file_path(output_base_dir, self.output_file)
self.csv_headers = []
self.create_csv_headers_from_keys()
def create_csv_headers_from_keys(self, template=None, parent_keys=""):
if template is None:
template = self.config["template"]
for key in template.keys():
if type(template[key]) is dict:
self.create_csv_headers_from_keys(
template[key], parent_keys+key+"_"
)
else:
self.csv_headers.append(parent_keys+key)
def get_output_file_path(self, base_dir, output_file):
if base_dir.startswith(os.sep):
file_name = os.path.join(base_dir, output_file)
else:
dir_name = os.path.dirname(__file__)
base_dir = os.path.join(dir_name, "..", base_dir)
file_name = os.path.join(base_dir, output_file)
file_name = os.path.abspath(file_name)
if not os.path.exists(base_dir):
try:
os.makedirs(base_dir)
except FileExistsError:
pass
return file_name
def get_timestamp(self):
return datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
def get_files_to_collect(self):
if "*" in self.input_path:
files = glob(self.input_path)
else:
files = glob(self.input_path + "/*/*/*.json")
if len(files) > 0:
return files
else:
message = "No files found in {}".format(self.input_path)
logging.error(message)
raise Exception(message)
def collect_file(self, file_path):
with open(file_path, "r") as f:
data = json.load(f)
return data
def save_collected_json(self, data):
json_file_name = self.output_file_path.replace(".csv", ".json")
with open(json_file_name, "w+") as f:
f.write(json.dumps(data))
logging.info("JSON file created at \"{}\".".format((json_file_name)))
def save_json_to_csv(self, data):
dataframe = json_normalize(data)
dataframe.to_csv(self.output_file_path, index=False)
logging.info("CSV file created at \"{}\".".format((self.output_file_path)))
def collect(self):
files_to_collect = self.get_files_to_collect()
logging.info("Found %d files to process." % len(files_to_collect))
with ThreadPoolExecutor(max_workers=50) as executor:
logging.info("Starting collection process with {} parallel threads.".format(self.max_threads))
start_time = datetime.now()
futures = []
collected_lines = []
for file_path in files_to_collect:
futures.append(executor.submit(self.collect_file, file_path))
for future in as_completed(futures):
collected_lines.append(future.result())
self.save_collected_json(collected_lines)
self.save_json_to_csv(collected_lines)
elapsed_time = datetime.now() - start_time
logging.info("Collection process finished in {}.".format((elapsed_time)))
| 38.394495 | 121 | 0.621983 | import os
import json
import logging
from pandas import json_normalize
from src.configurable import Configurable
from datetime import datetime
from glob import glob
from concurrent.futures import ThreadPoolExecutor, as_completed
class Collector(Configurable):
def __init__(self, input_path=None, output_file="collected-{}.csv", config_file=None, output_base_dir="./generated"):
Configurable.__init__(self, config_file)
logging.basicConfig(format="%(asctime)s: [%(levelname)s] %(message)s", level=logging.INFO,
datefmt="%H:%M:%S", filename=self.get_log_file('collector'))
if input_path is None:
self.input_path = self.config.get("outputPath")
else:
self.input_path = input_path
self.output_file = output_file.format(self.get_timestamp())
max_threads = self.config.get("maxThreadCount")
self.max_threads = max_threads if max_threads is not None else 50
self.output_file_path = self.get_output_file_path(output_base_dir, self.output_file)
self.csv_headers = []
self.create_csv_headers_from_keys()
def create_csv_headers_from_keys(self, template=None, parent_keys=""):
if template is None:
template = self.config["template"]
for key in template.keys():
if type(template[key]) is dict:
self.create_csv_headers_from_keys(
template[key], parent_keys+key+"_"
)
else:
self.csv_headers.append(parent_keys+key)
def get_output_file_path(self, base_dir, output_file):
if base_dir.startswith(os.sep):
file_name = os.path.join(base_dir, output_file)
else:
dir_name = os.path.dirname(__file__)
base_dir = os.path.join(dir_name, "..", base_dir)
file_name = os.path.join(base_dir, output_file)
file_name = os.path.abspath(file_name)
if not os.path.exists(base_dir):
try:
os.makedirs(base_dir)
except FileExistsError:
pass
return file_name
def get_timestamp(self):
return datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
def get_files_to_collect(self):
if "*" in self.input_path:
files = glob(self.input_path)
else:
files = glob(self.input_path + "/*/*/*.json")
if len(files) > 0:
return files
else:
message = "No files found in {}".format(self.input_path)
logging.error(message)
raise Exception(message)
def collect_file(self, file_path):
with open(file_path, "r") as f:
data = json.load(f)
return data
def save_collected_json(self, data):
json_file_name = self.output_file_path.replace(".csv", ".json")
with open(json_file_name, "w+") as f:
f.write(json.dumps(data))
logging.info("JSON file created at \"{}\".".format((json_file_name)))
def save_json_to_csv(self, data):
dataframe = json_normalize(data)
dataframe.to_csv(self.output_file_path, index=False)
logging.info("CSV file created at \"{}\".".format((self.output_file_path)))
def collect(self):
files_to_collect = self.get_files_to_collect()
logging.info("Found %d files to process." % len(files_to_collect))
with ThreadPoolExecutor(max_workers=50) as executor:
logging.info("Starting collection process with {} parallel threads.".format(self.max_threads))
start_time = datetime.now()
futures = []
collected_lines = []
for file_path in files_to_collect:
futures.append(executor.submit(self.collect_file, file_path))
for future in as_completed(futures):
collected_lines.append(future.result())
self.save_collected_json(collected_lines)
self.save_json_to_csv(collected_lines)
elapsed_time = datetime.now() - start_time
logging.info("Collection process finished in {}.".format((elapsed_time)))
| true | true |
f7fd9851fc9c05dde5a5ce02508213ae080f0bb0 | 466 | py | Python | ngn/resources/state.py | hodgestar/banjo | a168efff6f1660bba964bf652aae821607d2643c | [
"MIT"
] | null | null | null | ngn/resources/state.py | hodgestar/banjo | a168efff6f1660bba964bf652aae821607d2643c | [
"MIT"
] | null | null | null | ngn/resources/state.py | hodgestar/banjo | a168efff6f1660bba964bf652aae821607d2643c | [
"MIT"
] | null | null | null | """ A resource for holding game state. """
from .base import Resource, ResourceEvent
class StateResource(Resource):
""" A resource for holding game state. """
name = 'state'
def __init__(self):
self.state = {}
self.state['ngn'] = {
'fps': 0.0,
}
def apply_set_event(self, **kw):
self.state.update(kw)
class StateUpdate(ResourceEvent):
""" Base class for state events. """
resource = 'state'
| 20.26087 | 46 | 0.585837 |
from .base import Resource, ResourceEvent
class StateResource(Resource):
name = 'state'
def __init__(self):
self.state = {}
self.state['ngn'] = {
'fps': 0.0,
}
def apply_set_event(self, **kw):
self.state.update(kw)
class StateUpdate(ResourceEvent):
resource = 'state'
| true | true |
f7fd9894e47f4c8c58c553cb352b0ea11e25e59a | 200 | py | Python | day02/test13.py | liuweidongg/dejin | 12240b9d27347d6e041338869591aa7133bf80cd | [
"Apache-2.0"
] | 1 | 2018-09-27T02:01:38.000Z | 2018-09-27T02:01:38.000Z | day02/test13.py | liuweidongg/dejin | 12240b9d27347d6e041338869591aa7133bf80cd | [
"Apache-2.0"
] | null | null | null | day02/test13.py | liuweidongg/dejin | 12240b9d27347d6e041338869591aa7133bf80cd | [
"Apache-2.0"
] | null | null | null | kg1,jq1 = eval(raw_input(">>"))
kg2,jq2 = eval(raw_input(">>"))
dj1 = jq1/kg1
dj2 = jq2/kg2
if dj1 > dj2:
print("Package 2 has the better price")
else:
print("Package 1 has the better price")
| 22.222222 | 43 | 0.64 | kg1,jq1 = eval(raw_input(">>"))
kg2,jq2 = eval(raw_input(">>"))
dj1 = jq1/kg1
dj2 = jq2/kg2
if dj1 > dj2:
print("Package 2 has the better price")
else:
print("Package 1 has the better price")
| true | true |
f7fd9936776797c37b889b257034f33f0ecaec9c | 2,833 | py | Python | PhysicsTools/NanoAOD/python/globals_cff.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | PhysicsTools/NanoAOD/python/globals_cff.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | PhysicsTools/NanoAOD/python/globals_cff.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.common_cff import *
rhoTable = cms.EDProducer("GlobalVariablesTableProducer",
variables = cms.PSet(
fixedGridRhoFastjetAll = ExtVar( cms.InputTag("fixedGridRhoFastjetAll"), "double", doc = "rho from all PF Candidates, used e.g. for JECs" ),
fixedGridRhoFastjetCentralNeutral = ExtVar( cms.InputTag("fixedGridRhoFastjetCentralNeutral"), "double", doc = "rho from neutral PF Candidates with |eta| < 2.5, used e.g. for rho corrections of some lepton isolations" ),
fixedGridRhoFastjetCentralCalo = ExtVar( cms.InputTag("fixedGridRhoFastjetCentralCalo"), "double", doc = "rho from calo towers with |eta| < 2.5, used e.g. egamma PFCluster isolation" ),
fixedGridRhoFastjetCentral = ExtVar( cms.InputTag("fixedGridRhoFastjetCentral"), "double", doc = "rho from all PF Candidates for central region, used e.g. for JECs" ),
fixedGridRhoFastjetCentralChargedPileUp = ExtVar( cms.InputTag("fixedGridRhoFastjetCentralChargedPileUp"), "double", doc = "rho from charged PF Candidates for central region, used e.g. for JECs" ),
)
)
puTable = cms.EDProducer("NPUTablesProducer",
src = cms.InputTag("slimmedAddPileupInfo"),
pvsrc = cms.InputTag("offlineSlimmedPrimaryVertices"),
zbins = cms.vdouble( [0.0,1.7,2.6,3.0,3.5,4.2,5.2,6.0,7.5,9.0,12.0] ),
savePtHatMax = cms.bool(False),
)
genTable = cms.EDProducer("SimpleGenEventFlatTableProducer",
src = cms.InputTag("generator"),
cut = cms.string(""),
name= cms.string("Generator"),
doc = cms.string("Generator information"),
singleton = cms.bool(True),
extension = cms.bool(False),
variables = cms.PSet(
x1 = Var( "?hasPDF?pdf().x.first:-1", float, doc="x1 fraction of proton momentum carried by the first parton",precision=14 ),
x2 = Var( "?hasPDF?pdf().x.second:-1", float, doc="x2 fraction of proton momentum carried by the second parton",precision=14 ),
xpdf1 = Var( "?hasPDF?pdf().xPDF.first:-1", float, doc="x*pdf(x) for the first parton", precision=14 ),
xpdf2 = Var( "?hasPDF?pdf().xPDF.second:-1", float, doc="x*pdf(x) for the second parton", precision=14 ),
id1 = Var( "?hasPDF?pdf().id.first:-1", int, doc="id of first parton", precision=6 ),
id2 = Var( "?hasPDF?pdf().id.second:-1", int, doc="id of second parton", precision=6 ),
scalePDF = Var( "?hasPDF?pdf().scalePDF:-1", float, doc="Q2 scale for PDF", precision=14 ),
binvar = Var("?hasBinningValues()?binningValues()[0]:-1", float, doc="MC generation binning value", precision=14),
weight = Var("weight()", float,doc="MC generator weight", precision=14),
),
)
globalTablesTask = cms.Task(rhoTable)
globalTablesMCTask = cms.Task(puTable,genTable)
| 65.883721 | 228 | 0.676315 | import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.common_cff import *
rhoTable = cms.EDProducer("GlobalVariablesTableProducer",
variables = cms.PSet(
fixedGridRhoFastjetAll = ExtVar( cms.InputTag("fixedGridRhoFastjetAll"), "double", doc = "rho from all PF Candidates, used e.g. for JECs" ),
fixedGridRhoFastjetCentralNeutral = ExtVar( cms.InputTag("fixedGridRhoFastjetCentralNeutral"), "double", doc = "rho from neutral PF Candidates with |eta| < 2.5, used e.g. for rho corrections of some lepton isolations" ),
fixedGridRhoFastjetCentralCalo = ExtVar( cms.InputTag("fixedGridRhoFastjetCentralCalo"), "double", doc = "rho from calo towers with |eta| < 2.5, used e.g. egamma PFCluster isolation" ),
fixedGridRhoFastjetCentral = ExtVar( cms.InputTag("fixedGridRhoFastjetCentral"), "double", doc = "rho from all PF Candidates for central region, used e.g. for JECs" ),
fixedGridRhoFastjetCentralChargedPileUp = ExtVar( cms.InputTag("fixedGridRhoFastjetCentralChargedPileUp"), "double", doc = "rho from charged PF Candidates for central region, used e.g. for JECs" ),
)
)
puTable = cms.EDProducer("NPUTablesProducer",
src = cms.InputTag("slimmedAddPileupInfo"),
pvsrc = cms.InputTag("offlineSlimmedPrimaryVertices"),
zbins = cms.vdouble( [0.0,1.7,2.6,3.0,3.5,4.2,5.2,6.0,7.5,9.0,12.0] ),
savePtHatMax = cms.bool(False),
)
genTable = cms.EDProducer("SimpleGenEventFlatTableProducer",
src = cms.InputTag("generator"),
cut = cms.string(""),
name= cms.string("Generator"),
doc = cms.string("Generator information"),
singleton = cms.bool(True),
extension = cms.bool(False),
variables = cms.PSet(
x1 = Var( "?hasPDF?pdf().x.first:-1", float, doc="x1 fraction of proton momentum carried by the first parton",precision=14 ),
x2 = Var( "?hasPDF?pdf().x.second:-1", float, doc="x2 fraction of proton momentum carried by the second parton",precision=14 ),
xpdf1 = Var( "?hasPDF?pdf().xPDF.first:-1", float, doc="x*pdf(x) for the first parton", precision=14 ),
xpdf2 = Var( "?hasPDF?pdf().xPDF.second:-1", float, doc="x*pdf(x) for the second parton", precision=14 ),
id1 = Var( "?hasPDF?pdf().id.first:-1", int, doc="id of first parton", precision=6 ),
id2 = Var( "?hasPDF?pdf().id.second:-1", int, doc="id of second parton", precision=6 ),
scalePDF = Var( "?hasPDF?pdf().scalePDF:-1", float, doc="Q2 scale for PDF", precision=14 ),
binvar = Var("?hasBinningValues()?binningValues()[0]:-1", float, doc="MC generation binning value", precision=14),
weight = Var("weight()", float,doc="MC generator weight", precision=14),
),
)
globalTablesTask = cms.Task(rhoTable)
globalTablesMCTask = cms.Task(puTable,genTable)
| true | true |
f7fd9aeef918f4ae3dffe14b1cf95b479a5a7ed2 | 1,599 | py | Python | tests/test_calculation.py | esak21/learnapi_beginners | 2a80fbfe98eedcb08528c211a90b76d08628be95 | [
"Apache-2.0"
] | null | null | null | tests/test_calculation.py | esak21/learnapi_beginners | 2a80fbfe98eedcb08528c211a90b76d08628be95 | [
"Apache-2.0"
] | null | null | null | tests/test_calculation.py | esak21/learnapi_beginners | 2a80fbfe98eedcb08528c211a90b76d08628be95 | [
"Apache-2.0"
] | null | null | null | import pytest
from app.calculation import add , sub, mul, div, BankAccount, InsufficientFund
## Creating a Fixture for our Bank account class
@pytest.fixture
def zero_bank_account():
return BankAccount()
@pytest.fixture
def bank_account():
return BankAccount(50)
@pytest.mark.parametrize(
"num1, num2, result",
[ (3,3,6),
(4,2,6),
(4,5,9),
(1,3,4) ]
)
def test_add(num1, num2,result ):
assert add(num1, num2) == result
def test_sub():
assert sub(9,5) == 4
def test_mul():
assert mul(1,2) == 2
def test_div():
assert div(4,2) == 2
def test_bank_set_init_amount(bank_account):
assert bank_account.balance == 50
def test_bank_default_amount(zero_bank_account):
assert zero_bank_account.balance == 0
def test_bank_withdraw_amount(bank_account):
bank_account.withdraw(50)
assert bank_account.balance == 0
def test_bank_deposit_amount(bank_account):
bank_account.deposit(10)
assert bank_account.balance == 60
def test_bank_interest_amount(bank_account):
bank_account.collect_interest()
assert round(bank_account.balance, 2) == 55.00
@pytest.mark.parametrize(
"deposited, withdraw, result",
[ (500,300,200),
(400,200,200),
(400,400,0),
(1000,300,700) ]
)
def test_bank_transaction(zero_bank_account, deposited, withdraw,result):
zero_bank_account.deposit(deposited)
zero_bank_account.withdraw(withdraw)
assert zero_bank_account.balance == result
def test_insufficient_funds(bank_account):
with pytest.raises(InsufficientFund):
bank_account.withdraw(200)
| 22.208333 | 78 | 0.707942 | import pytest
from app.calculation import add , sub, mul, div, BankAccount, InsufficientFund
eturn BankAccount()
@pytest.fixture
def bank_account():
return BankAccount(50)
@pytest.mark.parametrize(
"num1, num2, result",
[ (3,3,6),
(4,2,6),
(4,5,9),
(1,3,4) ]
)
def test_add(num1, num2,result ):
assert add(num1, num2) == result
def test_sub():
assert sub(9,5) == 4
def test_mul():
assert mul(1,2) == 2
def test_div():
assert div(4,2) == 2
def test_bank_set_init_amount(bank_account):
assert bank_account.balance == 50
def test_bank_default_amount(zero_bank_account):
assert zero_bank_account.balance == 0
def test_bank_withdraw_amount(bank_account):
bank_account.withdraw(50)
assert bank_account.balance == 0
def test_bank_deposit_amount(bank_account):
bank_account.deposit(10)
assert bank_account.balance == 60
def test_bank_interest_amount(bank_account):
bank_account.collect_interest()
assert round(bank_account.balance, 2) == 55.00
@pytest.mark.parametrize(
"deposited, withdraw, result",
[ (500,300,200),
(400,200,200),
(400,400,0),
(1000,300,700) ]
)
def test_bank_transaction(zero_bank_account, deposited, withdraw,result):
zero_bank_account.deposit(deposited)
zero_bank_account.withdraw(withdraw)
assert zero_bank_account.balance == result
def test_insufficient_funds(bank_account):
with pytest.raises(InsufficientFund):
bank_account.withdraw(200)
| true | true |
f7fd9b5849162b9798e259ab506444fc52abbb48 | 264 | py | Python | output/models/nist_data/atomic/g_year/schema_instance/nistschema_sv_iv_atomic_g_year_max_exclusive_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/g_year/schema_instance/nistschema_sv_iv_atomic_g_year_max_exclusive_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/g_year/schema_instance/nistschema_sv_iv_atomic_g_year_max_exclusive_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.atomic.g_year.schema_instance.nistschema_sv_iv_atomic_g_year_max_exclusive_2_xsd.nistschema_sv_iv_atomic_g_year_max_exclusive_2 import NistschemaSvIvAtomicGYearMaxExclusive2
__all__ = [
"NistschemaSvIvAtomicGYearMaxExclusive2",
]
| 44 | 202 | 0.897727 | from output.models.nist_data.atomic.g_year.schema_instance.nistschema_sv_iv_atomic_g_year_max_exclusive_2_xsd.nistschema_sv_iv_atomic_g_year_max_exclusive_2 import NistschemaSvIvAtomicGYearMaxExclusive2
__all__ = [
"NistschemaSvIvAtomicGYearMaxExclusive2",
]
| true | true |
f7fd9c8d21ede9ba6de226daa3b159c437eea1ff | 1,179 | py | Python | yoongram/users/adapters.py | happyjy/yoonGram | 20555619721065296d5dab88e80c763b4a3f295e | [
"MIT"
] | null | null | null | yoongram/users/adapters.py | happyjy/yoonGram | 20555619721065296d5dab88e80c763b4a3f295e | [
"MIT"
] | 9 | 2021-03-09T02:00:36.000Z | 2022-02-26T10:13:36.000Z | yoongram/users/adapters.py | happyjy/yoonGram | 20555619721065296d5dab88e80c763b4a3f295e | [
"MIT"
] | null | null | null | from typing import Any
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from django.conf import settings
from django.http import HttpRequest
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request: HttpRequest):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
def save_user(self, request, user, form):
if len(user.socialaccount_set.all()) == 0:
name = request.data.get('name', None)
email = request.data.get('email', None)
username = request.data.get('username', None)
password1 = request.data.get('password1', None)
password2 = request.data.get('password2', None)
user.name = name
user.email = email
user.username = username
if(password1 == password2):
user.set_password(password1)
user.save()
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def is_open_for_signup(self, request: HttpRequest, sociallogin: Any):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
| 38.032258 | 73 | 0.685327 | from typing import Any
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from django.conf import settings
from django.http import HttpRequest
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request: HttpRequest):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
def save_user(self, request, user, form):
if len(user.socialaccount_set.all()) == 0:
name = request.data.get('name', None)
email = request.data.get('email', None)
username = request.data.get('username', None)
password1 = request.data.get('password1', None)
password2 = request.data.get('password2', None)
user.name = name
user.email = email
user.username = username
if(password1 == password2):
user.set_password(password1)
user.save()
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def is_open_for_signup(self, request: HttpRequest, sociallogin: Any):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
| true | true |
f7fd9cae1ecd0a224c3db0cf3eb8b52563e45bf4 | 2,406 | py | Python | superqt/_tests/test_eliding_label.py | alisterburt/superqt | 5ab72a0c488d36c3b3b6bd70134656358978afc8 | [
"BSD-3-Clause"
] | null | null | null | superqt/_tests/test_eliding_label.py | alisterburt/superqt | 5ab72a0c488d36c3b3b6bd70134656358978afc8 | [
"BSD-3-Clause"
] | null | null | null | superqt/_tests/test_eliding_label.py | alisterburt/superqt | 5ab72a0c488d36c3b3b6bd70134656358978afc8 | [
"BSD-3-Clause"
] | null | null | null | from superqt import QElidingLabel
from superqt.qtcompat.QtCore import QSize, Qt
from superqt.qtcompat.QtGui import QResizeEvent
TEXT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad "
"minim ven iam, quis nostrud exercitation ullamco laborisnisi ut aliquip "
"ex ea commodo consequat. Duis aute irure dolor inreprehenderit in voluptate "
"velit esse cillum dolore eu fugiat nullapariatur."
)
ELLIPSIS = "…"
def test_eliding_label(qtbot):
wdg = QElidingLabel(TEXT)
qtbot.addWidget(wdg)
assert wdg._elidedText().endswith(ELLIPSIS)
oldsize = wdg.size()
newsize = QSize(200, 20)
wdg.resize(newsize)
wdg.resizeEvent(QResizeEvent(oldsize, newsize)) # for test coverage
assert wdg.text() == TEXT
def test_wrapped_eliding_label(qtbot):
wdg = QElidingLabel(TEXT)
qtbot.addWidget(wdg)
assert not wdg.wordWrap()
assert 630 < wdg.sizeHint().width() < 635
assert wdg._elidedText() == (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et d…"
)
wdg.resize(QSize(200, 100))
assert wdg.text() == TEXT
assert wdg._elidedText() == "Lorem ipsum dolor sit amet, co…"
wdg.setWordWrap(True)
assert wdg.wordWrap()
assert wdg.text() == TEXT
assert wdg._elidedText() == (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et dolore magna aliqua. "
"Ut enim ad minim ven iam, quis nostrud exercitation ullamco la…"
)
assert wdg.sizeHint() == QSize(200, 176)
wdg.resize(wdg.sizeHint())
assert wdg._elidedText() == TEXT
def test_shorter_eliding_label(qtbot):
short = "asd a ads sd flksdf dsf lksfj sd lsdjf sd lsdfk sdlkfj s"
wdg = QElidingLabel()
qtbot.addWidget(wdg)
wdg.setText(short)
assert not wdg._elidedText().endswith(ELLIPSIS)
wdg.resize(100, 20)
assert wdg._elidedText().endswith(ELLIPSIS)
wdg.setElideMode(Qt.TextElideMode.ElideLeft)
assert wdg._elidedText().startswith(ELLIPSIS)
assert wdg.elideMode() == Qt.TextElideMode.ElideLeft
def test_wrap_text():
wrap = QElidingLabel.wrapText(TEXT, 200)
assert isinstance(wrap, list)
assert all(isinstance(x, str) for x in wrap)
assert len(wrap) == 11
| 34.869565 | 82 | 0.698254 | from superqt import QElidingLabel
from superqt.qtcompat.QtCore import QSize, Qt
from superqt.qtcompat.QtGui import QResizeEvent
TEXT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad "
"minim ven iam, quis nostrud exercitation ullamco laborisnisi ut aliquip "
"ex ea commodo consequat. Duis aute irure dolor inreprehenderit in voluptate "
"velit esse cillum dolore eu fugiat nullapariatur."
)
ELLIPSIS = "…"
def test_eliding_label(qtbot):
wdg = QElidingLabel(TEXT)
qtbot.addWidget(wdg)
assert wdg._elidedText().endswith(ELLIPSIS)
oldsize = wdg.size()
newsize = QSize(200, 20)
wdg.resize(newsize)
wdg.resizeEvent(QResizeEvent(oldsize, newsize))
assert wdg.text() == TEXT
def test_wrapped_eliding_label(qtbot):
wdg = QElidingLabel(TEXT)
qtbot.addWidget(wdg)
assert not wdg.wordWrap()
assert 630 < wdg.sizeHint().width() < 635
assert wdg._elidedText() == (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et d…"
)
wdg.resize(QSize(200, 100))
assert wdg.text() == TEXT
assert wdg._elidedText() == "Lorem ipsum dolor sit amet, co…"
wdg.setWordWrap(True)
assert wdg.wordWrap()
assert wdg.text() == TEXT
assert wdg._elidedText() == (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
"eiusmod tempor incididunt ut labore et dolore magna aliqua. "
"Ut enim ad minim ven iam, quis nostrud exercitation ullamco la…"
)
assert wdg.sizeHint() == QSize(200, 176)
wdg.resize(wdg.sizeHint())
assert wdg._elidedText() == TEXT
def test_shorter_eliding_label(qtbot):
short = "asd a ads sd flksdf dsf lksfj sd lsdjf sd lsdfk sdlkfj s"
wdg = QElidingLabel()
qtbot.addWidget(wdg)
wdg.setText(short)
assert not wdg._elidedText().endswith(ELLIPSIS)
wdg.resize(100, 20)
assert wdg._elidedText().endswith(ELLIPSIS)
wdg.setElideMode(Qt.TextElideMode.ElideLeft)
assert wdg._elidedText().startswith(ELLIPSIS)
assert wdg.elideMode() == Qt.TextElideMode.ElideLeft
def test_wrap_text():
wrap = QElidingLabel.wrapText(TEXT, 200)
assert isinstance(wrap, list)
assert all(isinstance(x, str) for x in wrap)
assert len(wrap) == 11
| true | true |
f7fd9d53bb43bb4a289793d1cb06947372a35456 | 6,560 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/gcp_spanner_database_info.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/gcp_spanner_database_info.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/gcp_spanner_database_info.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_database_info
description:
- Gather info for GCP Database
short_description: Gather info for GCP Database
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The instance to create the database on.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''name'' and value of
your resource''s name Alternatively, you can add `register: name-of-resource`
to a gcp_spanner_instance task and then set this instance field to "{{ name-of-resource
}}"'
required: true
type: dict
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a database
gcp_spanner_database_info:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].
returned: success
type: str
extraStatements:
description:
- 'An optional list of DDL statements to run inside the newly created database.
Statements can create tables, indexes, etc. These statements execute atomically
with the creation of the database: if there is an error in any statement,
the database is not created.'
returned: success
type: list
instance:
description:
- The instance to create the database on.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True, type='dict')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res)
def fetch_list(module, link):
auth = GcpSession(module, 'spanner')
return auth.list(link, return_if_object, array_name='databases')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| 34.166667 | 147 | 0.628811 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
| true | true |
f7fd9d72917f67c968c12a062913db529405d312 | 5,260 | py | Python | IMLearn/learners/classifiers/perceptron.py | pelegrichman/IML.HUJI | b8c158b9a6e332313a8a69cbbfd42ed8aede2858 | [
"MIT"
] | 1 | 2022-03-09T18:51:53.000Z | 2022-03-09T18:51:53.000Z | IMLearn/learners/classifiers/perceptron.py | pelegrichman/IML.HUJI | b8c158b9a6e332313a8a69cbbfd42ed8aede2858 | [
"MIT"
] | null | null | null | IMLearn/learners/classifiers/perceptron.py | pelegrichman/IML.HUJI | b8c158b9a6e332313a8a69cbbfd42ed8aede2858 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Callable, List
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from ...metrics import misclassification_error
def default_callback(fit: Perceptron, x: np.ndarray, y: int):
pass
class Perceptron(BaseEstimator):
"""
Perceptron half-space classifier
Finds a separating hyperplane for given linearly separable data.
Attributes
----------
include_intercept: bool, default = True
Should fitted model include an intercept or not
max_iter_: int, default = 1000
Maximum number of passes over training data
coefs_: ndarray of shape (n_features,) or (n_features+1,)
Coefficients vector fitted by Perceptron algorithm. To be set in
`Perceptron.fit` function.
training_loss_: array of floats
holds the loss value of the algorithm during training.
training_loss_[i] is the loss value of the i'th training iteration.
to be filled in `Perceptron.fit` function.
"""
def __init__(self,
include_intercept: bool = True,
max_iter: int = 1000,
callback: Callable[[Perceptron, np.ndarray, int], None] = default_callback):
"""
Instantiate a Perceptron classifier
Parameters
----------
include_intercept: bool, default=True
Should fitted model include an intercept or not
max_iter: int, default = 1000
Maximum number of passes over training data
callback: Callable[[Perceptron, np.ndarray, int], None]
A callable to be called after each update of the model while fitting to given data
Callable function should receive as input a Perceptron instance, current sample and current response
Attributes
----------
include_intercept_: bool
Should fitted model include an intercept or not
max_iter): int, default = 1000
Maximum number of passes over training data
callback_: Callable[[Perceptron, np.ndarray, int], None]
A callable to be called after each update of the model while fitting to given data
Callable function should receive as input a Perceptron instance, current sample and current response
coefs_: ndarray of shape (n_features,) or (n_features+1,)
Coefficients vector fitted by Perceptron. To be set in `Perceptron.fit` function.
"""
super().__init__()
self.include_intercept_ = include_intercept
self.max_iter_ = max_iter
self.callback_ = callback
self.coefs_ = None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit a halfspace to to given samples. Iterate over given data as long as there exists a sample misclassified
or that did not reach `self.max_iter_`
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Notes
-----
Fits model with or without an intercept depending on value of `self.fit_intercept_`
"""
if self.include_intercept_:
X = np.c_[np.ones(len(X)), X]
# Init weights, training loss
self.coefs_: np.ndarray = np.zeros(X.shape[1])
self.training_loss_: List[float] = []
# Iterate until max_iter reach
for i in range(self.max_iter_):
# Check for misclassified sample.
misclassified_exist: bool = False
for sample, label in zip(X, y):
label_pred = np.dot(self.coefs_, sample)
if label * label_pred <= 0:
misclassified_exist = True
self.coefs_ += label * sample
self.fitted_ = True
# Update loss of current iter
self.callback_(self, sample, label)
break
# If no miss classifications than end iteration.
if not misclassified_exist:
break
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
if self.include_intercept_:
X = np.c_[np.ones(len(X)), X]
return X @ self.coefs_
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
return misclassification_error(y, X @ self.coefs_)
| 32.469136 | 115 | 0.605894 | from __future__ import annotations
from typing import Callable, List
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from ...metrics import misclassification_error
def default_callback(fit: Perceptron, x: np.ndarray, y: int):
pass
class Perceptron(BaseEstimator):
def __init__(self,
include_intercept: bool = True,
max_iter: int = 1000,
callback: Callable[[Perceptron, np.ndarray, int], None] = default_callback):
super().__init__()
self.include_intercept_ = include_intercept
self.max_iter_ = max_iter
self.callback_ = callback
self.coefs_ = None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
if self.include_intercept_:
X = np.c_[np.ones(len(X)), X]
self.coefs_: np.ndarray = np.zeros(X.shape[1])
self.training_loss_: List[float] = []
for i in range(self.max_iter_):
misclassified_exist: bool = False
for sample, label in zip(X, y):
label_pred = np.dot(self.coefs_, sample)
if label * label_pred <= 0:
misclassified_exist = True
self.coefs_ += label * sample
self.fitted_ = True
self.callback_(self, sample, label)
break
if not misclassified_exist:
break
def _predict(self, X: np.ndarray) -> np.ndarray:
if self.include_intercept_:
X = np.c_[np.ones(len(X)), X]
return X @ self.coefs_
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
return misclassification_error(y, X @ self.coefs_)
| true | true |
f7fd9f26ef574e59b1be0f05d5f957a7c223c716 | 288 | py | Python | glassdoor/items.py | snahor/glassdoor-questions | 402baa0b9321441a809760c9f4d2c7e8751d33b9 | [
"Unlicense"
] | 1 | 2017-09-24T23:24:49.000Z | 2017-09-24T23:24:49.000Z | glassdoor/items.py | snahor/glassdoor-questions | 402baa0b9321441a809760c9f4d2c7e8751d33b9 | [
"Unlicense"
] | null | null | null | glassdoor/items.py | snahor/glassdoor-questions | 402baa0b9321441a809760c9f4d2c7e8751d33b9 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class GlassdoorItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.2 | 51 | 0.6875 |
import scrapy
class GlassdoorItem(scrapy.Item):
pass
| true | true |
f7fd9f656c52c9d89bd258f04ea913966b9a2ca8 | 4,110 | py | Python | app/views/notebook_view.py | awesome-archive/susnote | 402384e5de1a51ce6e41aea090376d5efef05fdc | [
"MIT"
] | null | null | null | app/views/notebook_view.py | awesome-archive/susnote | 402384e5de1a51ce6e41aea090376d5efef05fdc | [
"MIT"
] | null | null | null | app/views/notebook_view.py | awesome-archive/susnote | 402384e5de1a51ce6e41aea090376d5efef05fdc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import logging
from sanic import Blueprint, response
from sanic.response import json, text, html, redirect
import ujson
import datetime
logger = logging.getLogger('notebook')
notebook_bp = Blueprint('notebook', url_prefix='notebook')
@notebook_bp.route('/get',methods=['GET'])
async def notebooks(request):
args = request.args
limit = args.get('limit',10)
start = args.get('start',0)
order = args.get('order','desc')
notebook_id = args.get('id',-1)
notebooks = []
author_id = request['session']['author_id'] if request['session']['author_id'] else -1
if author_id <0 :
return json({'error':'illegal information'}, status=400)
sql = """select * from notebook where author_id = '%s' """ % author_id
if notebook_id >0:
sql = sql + """and article_id=%s """ % article_id
sql = sql + """order by id %s limit %s offset %s""" % (order,limit,start)
async with request.app.db.acquire() as cur:
try:
records = await cur.fetch(sql)
logger.info(sql)
except Exception as e:
logger.error(e)
return json({'error':'illegal information'}, status=400)
if records:
for record in records:
notebooks.append({
"id": record['id'],
"name": record['name'],
})
return json(notebooks)
@notebook_bp.route('/post',methods=['POST'])
async def add_notebooks(request):
data = {}
try:
data = ujson.loads(request.body)
except:
return json({'error':'illegal information'},status=400)
author_id = request['session']['author_id'] if request['session']['author_id'] else -1
if author_id <0:
return json({'error':'illegal information'}, status=400)
sql = """insert into notebook (name,author_id) values ('%s','%s')""" \
%(data.get('name'), author_id)
async with request.app.db.acquire() as cur:
try:
await cur.fetch(sql)
logger.info(sql)
except Exception as e:
logger.error(e)
return json({'error':'service error'}, status=400)
return json({'success':'success'},status=200)
@notebook_bp.route('/put',methods=['PUT'])
async def update_notebook(request):
data = {}
try:
data = ujson.loads(request.body)
except:
return json({'error':'illegal information'},status=400)
author_id = request['session']['author_id'] if request['session']['author_id'] else -1
if author_id <0:
return json({'error':'illegal information'}, status=400)
notebook_id = data.get("id") if data.get("id") else -1
if article_id<0:
return json({'error':'illegal information'},status=400)
sql = """update notebook set name='%s', author_id='%s' where id=%s""" \
%(data.get('name'),author_id,notebook_id)
async with request.app.db.acquire() as cur:
try:
await cur.fetch(sql)
logger.info(sql)
except Exception as e:
logger.error(e)
return json({'error':'service error'}, status=400)
return json({'success':'success'},status=200)
@notebook_bp.route('/delete',methods=['DELETE'])
async def delete_notebook(request):
data = {}
try:
data = ujson.loads(request.body)
except:
return json({'error':'illegal information'},status=400)
author_id = request['session']['author_id'] if request['session']['author_id'] else -1
if author_id <0:
return json({'error':'illegal information'}, status=400)
notebook_id = data.get("id") if data.get("id") else -1
if article_id<0:
return json({'error':'illegal information'},status=400)
sql = """delete from notebook where author_id='%s' and id=%s"""%(author_id,notebook_id)
async with request.app.db.acquire() as cur:
try:
await cur.fetch(sql)
logger.info(sql)
except Exception as e:
logger.error(e)
return json({'error':'service error'}, status=400)
return json({'success':'success'},status=200)
| 33.145161 | 91 | 0.606569 |
import logging
from sanic import Blueprint, response
from sanic.response import json, text, html, redirect
import ujson
import datetime
logger = logging.getLogger('notebook')
notebook_bp = Blueprint('notebook', url_prefix='notebook')
@notebook_bp.route('/get',methods=['GET'])
async def notebooks(request):
args = request.args
limit = args.get('limit',10)
start = args.get('start',0)
order = args.get('order','desc')
notebook_id = args.get('id',-1)
notebooks = []
author_id = request['session']['author_id'] if request['session']['author_id'] else -1
if author_id <0 :
return json({'error':'illegal information'}, status=400)
sql = """select * from notebook where author_id = '%s' """ % author_id
if notebook_id >0:
sql = sql + """and article_id=%s """ % article_id
sql = sql + """order by id %s limit %s offset %s""" % (order,limit,start)
async with request.app.db.acquire() as cur:
try:
records = await cur.fetch(sql)
logger.info(sql)
except Exception as e:
logger.error(e)
return json({'error':'illegal information'}, status=400)
if records:
for record in records:
notebooks.append({
"id": record['id'],
"name": record['name'],
})
return json(notebooks)
@notebook_bp.route('/post',methods=['POST'])
async def add_notebooks(request):
data = {}
try:
data = ujson.loads(request.body)
except:
return json({'error':'illegal information'},status=400)
author_id = request['session']['author_id'] if request['session']['author_id'] else -1
if author_id <0:
return json({'error':'illegal information'}, status=400)
sql = """insert into notebook (name,author_id) values ('%s','%s')""" \
%(data.get('name'), author_id)
async with request.app.db.acquire() as cur:
try:
await cur.fetch(sql)
logger.info(sql)
except Exception as e:
logger.error(e)
return json({'error':'service error'}, status=400)
return json({'success':'success'},status=200)
@notebook_bp.route('/put',methods=['PUT'])
async def update_notebook(request):
data = {}
try:
data = ujson.loads(request.body)
except:
return json({'error':'illegal information'},status=400)
author_id = request['session']['author_id'] if request['session']['author_id'] else -1
if author_id <0:
return json({'error':'illegal information'}, status=400)
notebook_id = data.get("id") if data.get("id") else -1
if article_id<0:
return json({'error':'illegal information'},status=400)
sql = """update notebook set name='%s', author_id='%s' where id=%s""" \
%(data.get('name'),author_id,notebook_id)
async with request.app.db.acquire() as cur:
try:
await cur.fetch(sql)
logger.info(sql)
except Exception as e:
logger.error(e)
return json({'error':'service error'}, status=400)
return json({'success':'success'},status=200)
@notebook_bp.route('/delete',methods=['DELETE'])
async def delete_notebook(request):
data = {}
try:
data = ujson.loads(request.body)
except:
return json({'error':'illegal information'},status=400)
author_id = request['session']['author_id'] if request['session']['author_id'] else -1
if author_id <0:
return json({'error':'illegal information'}, status=400)
notebook_id = data.get("id") if data.get("id") else -1
if article_id<0:
return json({'error':'illegal information'},status=400)
sql = """delete from notebook where author_id='%s' and id=%s"""%(author_id,notebook_id)
async with request.app.db.acquire() as cur:
try:
await cur.fetch(sql)
logger.info(sql)
except Exception as e:
logger.error(e)
return json({'error':'service error'}, status=400)
return json({'success':'success'},status=200)
| true | true |
f7fd9f743952c0ea6b05b34d2391f181c6c94e55 | 6,201 | py | Python | app.py | ggojard/guittinywebdb | 9d90f4d6cf62e0917280e6c08398844fec71f525 | [
"MIT"
] | null | null | null | app.py | ggojard/guittinywebdb | 9d90f4d6cf62e0917280e6c08398844fec71f525 | [
"MIT"
] | null | null | null | app.py | ggojard/guittinywebdb | 9d90f4d6cf62e0917280e6c08398844fec71f525 | [
"MIT"
] | null | null | null | # from datetime import datetime
import os
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class TinyWebDB(db.Model):
__tablename__ = 'tinywebdb'
tag = db.Column(db.String, primary_key=True, nullable=False)
value = db.Column(db.String, nullable=False)
# The 'date' column is needed for deleting older entries, so not really required
# date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
db.create_all()
db.session.commit()
## COMMUN functions ##########################
def store_a_value(tag, value):
if tag:
# Prevent Duplicate Key error by updating the existing tag
existing_tag = TinyWebDB.query.filter_by(tag=tag).first()
if existing_tag:
#return 'EXISTING ' + tag + ', ' + value
existing_tag.value = value
db.session.commit()
else:
#return 'NEW ' + tag + ', ' + value
data = TinyWebDB(tag=tag, value=value)
db.session.add(data)
db.session.commit()
return jsonify(['STORED', tag, value])
return 'Invalid Tag!'
def add_item_to_tag_value(tag, item):
if tag:
existing_tag = TinyWebDB.query.filter_by(tag=tag).first()
if existing_tag:
current_value = existing_tag.value
if isinstance(current_value, str):
new_value = current_value[0:len(current_value)-1]
new_value += ',' + str(item) + ']'
#return tag + ', ' + item + ', ' + current_value + ', ' + new_value
existing_tag.value = new_value
db.session.commit()
return jsonify(['ADDED', tag, new_value])
else:
return 'Invalid value format!'
return 'Invalid Tag!'
## WEB APP ##########################
@app.route('/')
def hello_world():
tag = 'appinventor_user_actionable_scores_ranking'
return 'Hello, I\'m UP!'
@app.route('/storeavalue', methods=['POST']) #OK
def store_a_value_POST():
tag = request.form['tag']
value = request.form['value']
return store_a_value(tag, value)
@app.route('/getvalue', methods=['POST']) #OK
def get_value():
tag = request.form['tag']
if tag:
value = TinyWebDB.query.filter_by(tag=tag).first().value
return jsonify(['VALUE', tag, value])
return 'Invalid Tag!'
@app.route('/deleteentry')
def delete_entry():
# docs = db.search(User.name == 'John')
# for doc in docs:
# db.session.remove(where('value') == '')
# db.session.commit()
# return 'Empty entries have been deleted!'
return 'Not yet implemented!'
@app.route('/actionable/user/<user>') # OK
def get_scores(user):
tag = 'appinventor_user_actionable_scores_' + user #request.form['tag']
nb_play = 0
sum_play = 0
average = 0.00
if tag:
value = TinyWebDB.query.filter_by(tag=tag).first().value.replace("[", "").replace("]", "").split(',');
if value:
nb_play = len(value)
for v in value:
sum_play = sum_play + int(v)
nb_play = len(value)
average = format(sum_play/nb_play, '.2f')
return jsonify(['VALUE', 'nb', nb_play, 'sum', sum_play, 'average', average])
else:
return 'Invalid user: '+user
return 'User name missing: '
@app.route('/actionable/getuseraverage', methods=['POST']) #OK
def get_user_average():
user = request.form['user']
tag = 'appinventor_user_actionable_scores_' + user
nb_play = 0
sum_play = 0
average = 0.00
if tag:
value = TinyWebDB.query.filter_by(tag=tag).first().value.replace("[", "").replace("]", "").split(',');
nb_play = len(value)
for v in value:
sum_play = sum_play + int(v)
nb_play = len(value)
average = format(sum_play/nb_play, '.2f')
return jsonify(['VALUE', 'nb', nb_play, 'sum', sum_play, 'average', average])
return 'Invalid user: '+user
@app.route('/actionable/getranking') #, methods=['GET', 'POST']) #OK if users list is good
def get_ranking():
board = []
tag = 'appinventor_user_actionable_scores_ranking'
users = TinyWebDB.query.filter_by(tag=tag).first().value;
if users:
users = users.replace("[", "").replace("]", "").replace('"', '').split(',')
for user in users:
tag = 'appinventor_user_actionable_scores_' + user
nb_play = 0
sum_play = 0
average = 0.00
existing_tag = TinyWebDB.query.filter_by(tag=tag).first();
if existing_tag:
value = existing_tag.value
if value.find(',')>-1:
value = value.replace("[", "").replace("]", "").split(',')
nb_play = len(value)
for v in value:
sum_play = sum_play + int(v)
nb_play = len(value)
average = format(sum_play/nb_play, '.2f')
board.append([user, 'nb', nb_play, 'sum', sum_play, 'average', average])
#board.append(value)
return jsonify(board)
@app.route('/actionable/storeascore', methods=['POST']) #OK
def store_a_score():
user = request.form['user']
score = int(request.form['score'])
tag = 'appinventor_user_actionable_scores_' + user
existing_tag = TinyWebDB.query.filter_by(tag=tag).first();
if existing_tag:
return add_item_to_tag_value(tag, score)
else:
return store_a_value(tag, '[' + str(score) + ']')
@app.route('/actionable/create/user', methods=['POST']) #OK
def actionable_create_user():
user = request.form['user']
#tag = 'appinventor_user_actionable_scores_' + user
#empty_scores = '[]'
#store_a_value(tag, empty_scores)
tag = 'appinventor_user_actionable_scores_ranking'
return add_item_to_tag_value(tag, '"'+user+'"')
if __name__ == '__main__':
app.run()
| 34.071429 | 110 | 0.585067 |
import os
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class TinyWebDB(db.Model):
__tablename__ = 'tinywebdb'
tag = db.Column(db.String, primary_key=True, nullable=False)
value = db.Column(db.String, nullable=False)
db.create_all()
db.session.commit()
sonify(['STORED', tag, value])
return 'Invalid Tag!'
def add_item_to_tag_value(tag, item):
if tag:
existing_tag = TinyWebDB.query.filter_by(tag=tag).first()
if existing_tag:
current_value = existing_tag.value
if isinstance(current_value, str):
new_value = current_value[0:len(current_value)-1]
new_value += ',' + str(item) + ']'
existing_tag.value = new_value
db.session.commit()
return jsonify(['ADDED', tag, new_value])
else:
return 'Invalid value format!'
return 'Invalid Tag!'
.form['tag']
if tag:
value = TinyWebDB.query.filter_by(tag=tag).first().value
return jsonify(['VALUE', tag, value])
return 'Invalid Tag!'
@app.route('/deleteentry')
def delete_entry():
# docs = db.search(User.name == 'John')
# for doc in docs:
# db.session.remove(where('value') == '')
# db.session.commit()
# return 'Empty entries have been deleted!'
return 'Not yet implemented!'
@app.route('/actionable/user/<user>') # OK
def get_scores(user):
tag = 'appinventor_user_actionable_scores_' + user #request.form['tag']
nb_play = 0
sum_play = 0
average = 0.00
if tag:
value = TinyWebDB.query.filter_by(tag=tag).first().value.replace("[", "").replace("]", "").split(',');
if value:
nb_play = len(value)
for v in value:
sum_play = sum_play + int(v)
nb_play = len(value)
average = format(sum_play/nb_play, '.2f')
return jsonify(['VALUE', 'nb', nb_play, 'sum', sum_play, 'average', average])
else:
return 'Invalid user: '+user
return 'User name missing: '
@app.route('/actionable/getuseraverage', methods=['POST']) #OK
def get_user_average():
user = request.form['user']
tag = 'appinventor_user_actionable_scores_' + user
nb_play = 0
sum_play = 0
average = 0.00
if tag:
value = TinyWebDB.query.filter_by(tag=tag).first().value.replace("[", "").replace("]", "").split(',');
nb_play = len(value)
for v in value:
sum_play = sum_play + int(v)
nb_play = len(value)
average = format(sum_play/nb_play, '.2f')
return jsonify(['VALUE', 'nb', nb_play, 'sum', sum_play, 'average', average])
return 'Invalid user: '+user
@app.route('/actionable/getranking') #, methods=['GET', 'POST']) #OK if users list is good
def get_ranking():
board = []
tag = 'appinventor_user_actionable_scores_ranking'
users = TinyWebDB.query.filter_by(tag=tag).first().value;
if users:
users = users.replace("[", "").replace("]", "").replace('"', '').split(',')
for user in users:
tag = 'appinventor_user_actionable_scores_' + user
nb_play = 0
sum_play = 0
average = 0.00
existing_tag = TinyWebDB.query.filter_by(tag=tag).first();
if existing_tag:
value = existing_tag.value
if value.find(',')>-1:
value = value.replace("[", "").replace("]", "").split(',')
nb_play = len(value)
for v in value:
sum_play = sum_play + int(v)
nb_play = len(value)
average = format(sum_play/nb_play, '.2f')
board.append([user, 'nb', nb_play, 'sum', sum_play, 'average', average])
#board.append(value)
return jsonify(board)
@app.route('/actionable/storeascore', methods=['POST']) #OK
def store_a_score():
user = request.form['user']
score = int(request.form['score'])
tag = 'appinventor_user_actionable_scores_' + user
existing_tag = TinyWebDB.query.filter_by(tag=tag).first();
if existing_tag:
return add_item_to_tag_value(tag, score)
else:
return store_a_value(tag, '[' + str(score) + ']')
@app.route('/actionable/create/user', methods=['POST']) #OK
def actionable_create_user():
user = request.form['user']
#tag = 'appinventor_user_actionable_scores_' + user
#empty_scores = '[]'
#store_a_value(tag, empty_scores)
tag = 'appinventor_user_actionable_scores_ranking'
return add_item_to_tag_value(tag, '"'+user+'"')
if __name__ == '__main__':
app.run()
| true | true |
f7fda0748185d5ba88bb1fa3af9c06f1a4599a83 | 854 | py | Python | test/fixtures/generatekeyaddrvector.py | happyucjs/happyucjs-lightwallet | 5555829871cb0aade98a39483756e3bd66505c3b | [
"MIT"
] | null | null | null | test/fixtures/generatekeyaddrvector.py | happyucjs/happyucjs-lightwallet | 5555829871cb0aade98a39483756e3bd66505c3b | [
"MIT"
] | null | null | null | test/fixtures/generatekeyaddrvector.py | happyucjs/happyucjs-lightwallet | 5555829871cb0aade98a39483756e3bd66505c3b | [
"MIT"
] | null | null | null | from happyuc import tester
from happyuc import utils
from bitcoin import ecdsa_sign, ecdsa_raw_sign, ecdsa_raw_recover, decode_sig
import json
s = tester.state()
init_seed = 'some_random_initial_seed_'
indices = range(10000)
result_vector = []
for i in indices:
seed = init_seed + str(i)
key = utils.sha3(seed)
addr = utils.privtoaddr(key)
s.send(to=addr, sender=tester.k0, value=10**18)
assert (s.block.get_balance(addr) == 10**18)
s.send(to=tester.a0, sender=key, value=6*10**17)
assert (s.block.get_balance(addr) < 4*10**17 and s.block.get_balance > 3*10**17)
result_vector.append({'seed': seed,
'key' : utils.encode_hex(key),
'addr' : utils.encode_hex(addr)})
output = json.dumps(result_vector)
outfile = file('testvector.json', 'w')
outfile.write(output)
| 29.448276 | 84 | 0.662763 | from happyuc import tester
from happyuc import utils
from bitcoin import ecdsa_sign, ecdsa_raw_sign, ecdsa_raw_recover, decode_sig
import json
s = tester.state()
init_seed = 'some_random_initial_seed_'
indices = range(10000)
result_vector = []
for i in indices:
seed = init_seed + str(i)
key = utils.sha3(seed)
addr = utils.privtoaddr(key)
s.send(to=addr, sender=tester.k0, value=10**18)
assert (s.block.get_balance(addr) == 10**18)
s.send(to=tester.a0, sender=key, value=6*10**17)
assert (s.block.get_balance(addr) < 4*10**17 and s.block.get_balance > 3*10**17)
result_vector.append({'seed': seed,
'key' : utils.encode_hex(key),
'addr' : utils.encode_hex(addr)})
output = json.dumps(result_vector)
outfile = file('testvector.json', 'w')
outfile.write(output)
| true | true |
f7fda0be5bfa5ffa18a069db5ca8add1b4d483a4 | 11,993 | py | Python | moderngl_window/context/pyglet/window.py | joehalliwell/moderngl-window | f0241fe3a6bc058eb4d2c9d622934a168de82865 | [
"MIT"
] | 142 | 2019-11-11T23:14:28.000Z | 2022-03-29T08:37:03.000Z | moderngl_window/context/pyglet/window.py | robertdluigi/moderngl-window | b6319494a22107c4ffc64d2f50670f8cbf953667 | [
"MIT"
] | 107 | 2019-10-31T20:31:45.000Z | 2022-03-23T15:01:41.000Z | moderngl_window/context/pyglet/window.py | robertdluigi/moderngl-window | b6319494a22107c4ffc64d2f50670f8cbf953667 | [
"MIT"
] | 36 | 2019-12-12T16:14:10.000Z | 2022-01-18T22:58:21.000Z | from typing import Tuple
import platform
import pyglet
# On OS X we need to disable the shadow context
# because the 2.1 shadow context cannot be upgrade to a 3.3+ core
if platform.system() == "Darwin":
pyglet.options["shadow_window"] = False
pyglet.options["debug_gl"] = False
from moderngl_window.context.pyglet.keys import Keys # noqa: E402
from moderngl_window.context.base import BaseWindow # noqa: E402
class Window(BaseWindow):
"""
Window based on Pyglet 1.4.x
"""
#: Name of the window
name = "pyglet"
#: Pyglet specific key constants
keys = Keys
# pyglet button id -> universal button id
_mouse_button_map = {
1: 1,
4: 2,
2: 3,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
config = pyglet.gl.Config(
major_version=self.gl_version[0],
minor_version=self.gl_version[1],
forward_compatible=True,
depth_size=24,
double_buffer=True,
sample_buffers=1 if self.samples > 1 else 0,
samples=self.samples,
)
if self.fullscreen:
display = pyglet.canvas.get_display()
screen = display.get_default_screen()
self._width, self._height = screen.width, screen.height
self._window = PygletWrapper(
width=self._width,
height=self._height,
caption=self._title,
resizable=self._resizable,
vsync=self._vsync,
fullscreen=self._fullscreen,
config=config,
file_drops=True and platform.system() != "Darwin"
)
self.cursor = self._cursor
self._window.event(self.on_key_press)
self._window.event(self.on_key_release)
self._window.event(self.on_mouse_motion)
self._window.event(self.on_mouse_drag)
self._window.event(self.on_resize)
self._window.event(self.on_close)
self._window.event(self.on_mouse_press)
self._window.event(self.on_mouse_release)
self._window.event(self.on_mouse_scroll)
self._window.event(self.on_text)
self._window.event(self.on_show)
self._window.event(self.on_hide)
self._window.event(self.on_file_drop)
self.init_mgl_context()
self._buffer_width, self._buffer_height = self._window.get_framebuffer_size()
self.set_default_viewport()
def _set_fullscreen(self, value: bool) -> None:
self._window.set_fullscreen(value)
@property
def size(self) -> Tuple[int, int]:
"""Tuple[int, int]: current window size.
This property also support assignment::
# Resize the window to 1000 x 1000
window.size = 1000, 1000
"""
return self._width, self._height
@size.setter
def size(self, value: Tuple[int, int]):
self._window.set_size(value[0], value[1])
@property
def position(self) -> Tuple[int, int]:
"""Tuple[int, int]: The current window position.
This property can also be set to move the window::
# Move window to 100, 100
window.position = 100, 100
"""
return self._window.get_location()
@position.setter
def position(self, value: Tuple[int, int]):
self._window.set_location(value[0], value[1])
@property
def cursor(self) -> bool:
"""bool: Should the mouse cursor be visible inside the window?
This property can also be assigned to::
# Disable cursor
window.cursor = False
"""
return self._cursor
@cursor.setter
def cursor(self, value: bool):
self._window.set_mouse_visible(value)
self._cursor = value
@property
def mouse_exclusivity(self) -> bool:
"""bool: If mouse exclusivity is enabled.
When you enable mouse-exclusive mode, the mouse cursor is no longer
available. It is not merely hidden – no amount of mouse movement
will make it leave your application. This is for example useful
when you don't want the mouse leaving the screen when rotating
a 3d scene.
This property can also be set::
window.mouse_exclusivity = True
"""
return self._mouse_exclusivity
@mouse_exclusivity.setter
def mouse_exclusivity(self, value: bool):
self._window.set_exclusive_mouse(value)
self._mouse_exclusivity = value
@property
def title(self) -> str:
"""str: Window title.
This property can also be set::
window.title = "New Title"
"""
return self._title
@title.setter
def title(self, value: str):
self._window.set_caption(value)
self._title = value
@property
def is_closing(self) -> bool:
"""Check pyglet's internal exit state"""
return self._window.has_exit or super().is_closing
@is_closing.setter
def is_closing(self, value: bool):
self._close = value
def close(self) -> None:
"""Close the pyglet window directly"""
self.is_closing = True
self._window.close()
super().close()
def swap_buffers(self) -> None:
"""Swap buffers, increment frame counter and pull events"""
self._window.flip()
self._frames += 1
self._window.dispatch_events()
def _handle_modifiers(self, mods):
"""Update key modifier states"""
self._modifiers.shift = mods & 1 == 1
self._modifiers.ctrl = mods & 2 == 2
self._modifiers.alt = mods & 4 == 4
def _set_icon(self, icon_path: str) -> None:
icon = pyglet.image.load(icon_path)
self._window.set_icon(icon)
def on_key_press(self, symbol, modifiers):
"""Pyglet specific key press callback.
Forwards and translates the events to the standard methods.
Args:
symbol: The symbol of the pressed key
modifiers: Modifier state (shift, ctrl etc.)
"""
if self._exit_key is not None and symbol == self._exit_key:
self.close()
if self._fs_key is not None and symbol == self._fs_key:
self.fullscreen = not self.fullscreen
self._key_pressed_map[symbol] = True
self._handle_modifiers(modifiers)
self._key_event_func(symbol, self.keys.ACTION_PRESS, self._modifiers)
return pyglet.event.EVENT_HANDLED
def on_text(self, text):
"""Pyglet specific text input callback
Forwards and translates the events to the standard methods.
Args:
text (str): The unicode character entered
"""
self._unicode_char_entered_func(text)
def on_key_release(self, symbol, modifiers):
"""Pyglet specific key release callback.
Forwards and translates the events to standard methods.
Args:
symbol: The symbol of the pressed key
modifiers: Modifier state (shift, ctrl etc.)
"""
self._key_pressed_map[symbol] = False
self._handle_modifiers(modifiers)
self._key_event_func(symbol, self.keys.ACTION_RELEASE, self._modifiers)
def on_mouse_motion(self, x, y, dx, dy):
"""Pyglet specific mouse motion callback.
Forwards and translates the event to the standard methods.
Args:
x: x position of the mouse
y: y position of the mouse
dx: delta x position
dy: delta y position of the mouse
"""
# NOTE: Screen coordinates relative to the lower-left corner
# so we have to flip the y axis to make this consistent with
# other window libraries
self._mouse_position_event_func(x, self._height - y, dx, -dy)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
"""Pyglet specific mouse drag event.
When a mouse button is pressed this is the only way
to capture mouse position events
"""
self._handle_modifiers(modifiers)
self._mouse_drag_event_func(x, self._height - y, dx, -dy)
def on_mouse_press(self, x: int, y: int, button, mods):
"""Handle mouse press events and forward to standard methods
Args:
x: x position of the mouse when pressed
y: y position of the mouse when pressed
button: The pressed button
mods: Modifiers
"""
self._handle_modifiers(mods)
button = self._mouse_button_map.get(button, None)
if button is not None:
self._handle_mouse_button_state_change(button, True)
self._mouse_press_event_func(
x, self._height - y, button,
)
def on_mouse_release(self, x: int, y: int, button, mods):
"""Handle mouse release events and forward to standard methods
Args:
x: x position when mouse button was released
y: y position when mouse button was released
button: The button pressed
mods: Modifiers
"""
button = self._mouse_button_map.get(button, None)
if button is not None:
self._handle_mouse_button_state_change(button, False)
self._mouse_release_event_func(
x, self._height - y, button,
)
def on_mouse_scroll(self, x, y, x_offset: float, y_offset: float):
"""Handle mouse wheel.
Args:
x_offset (float): X scroll offset
y_offset (float): Y scroll offset
"""
self._handle_modifiers(0) # No modifiers available
self.mouse_scroll_event_func(x_offset, y_offset)
def on_resize(self, width: int, height: int):
"""Pyglet specific callback for window resize events forwarding to standard methods
Args:
width: New window width
height: New window height
"""
self._width, self._height = width, height
self._buffer_width, self._buffer_height = self._window.get_framebuffer_size()
self.set_default_viewport()
super().resize(self._buffer_width, self._buffer_height)
def on_close(self):
"""Pyglet specific window close callback"""
self._close_func()
def on_show(self):
"""Called when window first appear or restored from hidden state"""
self._iconify_func(False)
def on_hide(self):
"""Called when window is minimized"""
self._iconify_func(True)
def on_file_drop(self, x, y, paths):
"""Called when files dropped onto the window
Args:
x (int): X location in window where file was dropped
y (int): Y location in window where file was dropped
paths (list): List of file paths dropped
"""
# pyglet coordinate origin is in the bottom left corner of the window
# mglw coordinate origin is in the top left corner of the window
# convert pyglet coordinates to mglw coordinates:
(x, y) = self.convert_window_coordinates(x, y, y_flipped=True)
self._files_dropped_event_func(x, y, paths)
def destroy(self):
"""Destroy the pyglet window"""
pass
class PygletWrapper(pyglet.window.Window):
"""Block out some window methods so pyglet don't trigger GL errors"""
def on_resize(self, width, height):
"""Block out the resize method.
For some reason pyglet calls this triggering errors.
"""
pass
def on_draw(self):
"""Block out the default draw method to avoid GL errors"""
pass
| 32.76776 | 92 | 0.600267 | from typing import Tuple
import platform
import pyglet
if platform.system() == "Darwin":
pyglet.options["shadow_window"] = False
pyglet.options["debug_gl"] = False
from moderngl_window.context.pyglet.keys import Keys
from moderngl_window.context.base import BaseWindow
class Window(BaseWindow):
name = "pyglet"
keys = Keys
_mouse_button_map = {
1: 1,
4: 2,
2: 3,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
config = pyglet.gl.Config(
major_version=self.gl_version[0],
minor_version=self.gl_version[1],
forward_compatible=True,
depth_size=24,
double_buffer=True,
sample_buffers=1 if self.samples > 1 else 0,
samples=self.samples,
)
if self.fullscreen:
display = pyglet.canvas.get_display()
screen = display.get_default_screen()
self._width, self._height = screen.width, screen.height
self._window = PygletWrapper(
width=self._width,
height=self._height,
caption=self._title,
resizable=self._resizable,
vsync=self._vsync,
fullscreen=self._fullscreen,
config=config,
file_drops=True and platform.system() != "Darwin"
)
self.cursor = self._cursor
self._window.event(self.on_key_press)
self._window.event(self.on_key_release)
self._window.event(self.on_mouse_motion)
self._window.event(self.on_mouse_drag)
self._window.event(self.on_resize)
self._window.event(self.on_close)
self._window.event(self.on_mouse_press)
self._window.event(self.on_mouse_release)
self._window.event(self.on_mouse_scroll)
self._window.event(self.on_text)
self._window.event(self.on_show)
self._window.event(self.on_hide)
self._window.event(self.on_file_drop)
self.init_mgl_context()
self._buffer_width, self._buffer_height = self._window.get_framebuffer_size()
self.set_default_viewport()
def _set_fullscreen(self, value: bool) -> None:
self._window.set_fullscreen(value)
@property
def size(self) -> Tuple[int, int]:
return self._width, self._height
@size.setter
def size(self, value: Tuple[int, int]):
self._window.set_size(value[0], value[1])
@property
def position(self) -> Tuple[int, int]:
return self._window.get_location()
@position.setter
def position(self, value: Tuple[int, int]):
self._window.set_location(value[0], value[1])
@property
def cursor(self) -> bool:
return self._cursor
@cursor.setter
def cursor(self, value: bool):
self._window.set_mouse_visible(value)
self._cursor = value
@property
def mouse_exclusivity(self) -> bool:
return self._mouse_exclusivity
@mouse_exclusivity.setter
def mouse_exclusivity(self, value: bool):
self._window.set_exclusive_mouse(value)
self._mouse_exclusivity = value
@property
def title(self) -> str:
return self._title
@title.setter
def title(self, value: str):
self._window.set_caption(value)
self._title = value
@property
def is_closing(self) -> bool:
return self._window.has_exit or super().is_closing
@is_closing.setter
def is_closing(self, value: bool):
self._close = value
def close(self) -> None:
self.is_closing = True
self._window.close()
super().close()
def swap_buffers(self) -> None:
self._window.flip()
self._frames += 1
self._window.dispatch_events()
def _handle_modifiers(self, mods):
self._modifiers.shift = mods & 1 == 1
self._modifiers.ctrl = mods & 2 == 2
self._modifiers.alt = mods & 4 == 4
def _set_icon(self, icon_path: str) -> None:
icon = pyglet.image.load(icon_path)
self._window.set_icon(icon)
def on_key_press(self, symbol, modifiers):
if self._exit_key is not None and symbol == self._exit_key:
self.close()
if self._fs_key is not None and symbol == self._fs_key:
self.fullscreen = not self.fullscreen
self._key_pressed_map[symbol] = True
self._handle_modifiers(modifiers)
self._key_event_func(symbol, self.keys.ACTION_PRESS, self._modifiers)
return pyglet.event.EVENT_HANDLED
def on_text(self, text):
self._unicode_char_entered_func(text)
def on_key_release(self, symbol, modifiers):
self._key_pressed_map[symbol] = False
self._handle_modifiers(modifiers)
self._key_event_func(symbol, self.keys.ACTION_RELEASE, self._modifiers)
def on_mouse_motion(self, x, y, dx, dy):
self._mouse_position_event_func(x, self._height - y, dx, -dy)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self._handle_modifiers(modifiers)
self._mouse_drag_event_func(x, self._height - y, dx, -dy)
def on_mouse_press(self, x: int, y: int, button, mods):
self._handle_modifiers(mods)
button = self._mouse_button_map.get(button, None)
if button is not None:
self._handle_mouse_button_state_change(button, True)
self._mouse_press_event_func(
x, self._height - y, button,
)
def on_mouse_release(self, x: int, y: int, button, mods):
button = self._mouse_button_map.get(button, None)
if button is not None:
self._handle_mouse_button_state_change(button, False)
self._mouse_release_event_func(
x, self._height - y, button,
)
def on_mouse_scroll(self, x, y, x_offset: float, y_offset: float):
self._handle_modifiers(0)
self.mouse_scroll_event_func(x_offset, y_offset)
def on_resize(self, width: int, height: int):
self._width, self._height = width, height
self._buffer_width, self._buffer_height = self._window.get_framebuffer_size()
self.set_default_viewport()
super().resize(self._buffer_width, self._buffer_height)
def on_close(self):
self._close_func()
def on_show(self):
self._iconify_func(False)
def on_hide(self):
self._iconify_func(True)
def on_file_drop(self, x, y, paths):
(x, y) = self.convert_window_coordinates(x, y, y_flipped=True)
self._files_dropped_event_func(x, y, paths)
def destroy(self):
pass
class PygletWrapper(pyglet.window.Window):
def on_resize(self, width, height):
pass
def on_draw(self):
pass
| true | true |
f7fda1731af86be37b8c5e999eff6e24d03104fc | 6,822 | py | Python | Twitter-Data-Analysis/twitter_streaming.py | vivekparasharr/Social-Text-Web-Data-Analysis | 9c19edc2a2917454b558ee7e4464e0c41418b6cd | [
"MIT"
] | null | null | null | Twitter-Data-Analysis/twitter_streaming.py | vivekparasharr/Social-Text-Web-Data-Analysis | 9c19edc2a2917454b558ee7e4464e0c41418b6cd | [
"MIT"
] | null | null | null | Twitter-Data-Analysis/twitter_streaming.py | vivekparasharr/Social-Text-Web-Data-Analysis | 9c19edc2a2917454b558ee7e4464e0c41418b6cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 28 12:25:54 2017
@author: vivekparashar
"""
#Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
#Variables that contains the user credentials to access Twitter API
access_token = "----"
access_token_secret = "----"
consumer_key = "----"
consumer_secret = "----"
#import csv
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
if 'text' in data:
file = open('/Users/vivekparashar/Documents/python dsp jupyter notebook/trump_data2.txt', 'a')
file.write(data)
file.close()
#with open('/Users/vivekparashar/Documents/python dsp jupyter notebook/trump_data2.csv', 'w') as csvfile:
# tweetwriter = csv.writer(csvfile)
# tweetwriter.writerow([data])
#print (data)
return True
def on_error(self, status):
#print (status)
return True
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
#stream.filter(track=['python', 'javascript', 'ruby'])
#This line filter Twitter Streams to capture data by the keywords: 'MonacoGP'
stream.filter(track=['trump'])
import json
import pandas as pd
import matplotlib.pyplot as plt
tweets_data_path = '/Users/vivekparashar/Documents/python dsp jupyter notebook/trump_data2.txt'
tweets_data = []
tweets_file = open(tweets_data_path, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
#print the number of tweets
print (len(tweets_data))
import numpy as np
for i in np.arange(len(tweets_data)):
print(tweets_data[i]['text'])
#structure the tweets data into a pandas DataFrame
#start by creating an empty DataFrame called tweets
tweets = pd.DataFrame()
tweets.loc[0:1,:] # print out first 2 tweets
tweets.loc[0:1,'text'] # print out first 2 tweets, but only text
#add 3 columns to the tweets DataFrame called text, lang, and country. text column contains the tweet
tweets['text'] = list(map(lambda tweet: tweet.get['text',''], tweets_data))
tweets['text'] = list(map(lambda tweet: tweet['text'], tweets_data))
tweets['lang'] = list(map(lambda tweet: tweet['lang'], tweets_data))
#tweets['country'] = list(map(lambda tweet: tweet['place']['country'] if tweet['place'] != None else None, tweets_data))
tweets['retweet_count'] = list(map(lambda tweet: tweet['retweet_count'], tweets_data))
tweets['type'] = list(map(lambda tweet: tweet['type'], tweets_data))
"""
How about:
tweets['text'] = map(lambda tweet: tweet.get('text', ''), tweets_data)
Dictionary method .get() by default return None in case of missing key, but it might be any object.
In this case tweet['text'] is possibly a string, so it might be a good idea to put empty for missing
one.
"""
#create 2 charts:
#The first one describing the Top 5 languages in which the tweets were written
tweets_by_lang = tweets['lang'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Languages', fontsize=15)
ax.set_ylabel('Number of tweets' , fontsize=15)
ax.set_title('Top 5 languages', fontsize=15, fontweight='bold')
tweets_by_lang[:5].plot(ax=ax, kind='bar', color='red')
#The second the Top 5 countries from which the tweets were sent
tweets_by_country = tweets['country'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Countries', fontsize=15)
ax.set_ylabel('Number of tweets' , fontsize=15)
ax.set_title('Top 5 countries', fontsize=15, fontweight='bold')
tweets_by_country[:5].plot(ax=ax, kind='bar', color='blue')
#new vs retweet
#The second the Top 5 countries from which the tweets were sent
retweet_count = tweets['type'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Tweet type', fontsize=15)
ax.set_ylabel('Number of tweets' , fontsize=15)
ax.set_title('tweet type - photo, video, etc.', fontsize=15, fontweight='bold')
retweet_count[:3].plot(ax=ax, kind='bar', color='blue')
'''
You usually run across the KeyError when Python cannot find a specified key. This is often the case with JSON generated by the Twitter API that certain fields/keys will not be present for some tweets.
Instead of :
tweets['text'] = map(lambda tweet: tweet['text'], tweets_data)
Replace this with:
tweets['text'] = map(lambda tweet: tweet.get('text', None),tweets_data)
Similarly, say you are looking for a key that is nested two or more levels deep, you can chain multiple .get() functions like below.
tweets['child'] = map(lambda tweet: tweet.get('grandparent', {}).get('parent', {}).get('child') , tweets_data)
A more specific example:
tweets['user'] = map(lambda tweet: tweet.get('user', {}).get('name'),tweets_data)
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 3 18:43:16 2017
@author: vivekparashar
"""
#Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
#Variables that contains the user credentials to access Twitter API
access_token = "----"
access_token_secret = "----"
consumer_key = "----"
consumer_secret = "----"
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
print (data)
return True
def on_error(self, status):
print (status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.filter(track=['trump'])
import json
import pandas as pd
import matplotlib.pyplot as plt
tweets_data_path = '../twitter_data.txt'
tweets_data = []
tweets_file = open(tweets_data_path, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
| 30.052863 | 200 | 0.70903 |
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
access_token = "----"
access_token_secret = "----"
consumer_key = "----"
consumer_secret = "----"
class StdOutListener(StreamListener):
def on_data(self, data):
if 'text' in data:
file = open('/Users/vivekparashar/Documents/python dsp jupyter notebook/trump_data2.txt', 'a')
file.write(data)
file.close()
return True
def on_error(self, status):
return True
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=['trump'])
import json
import pandas as pd
import matplotlib.pyplot as plt
tweets_data_path = '/Users/vivekparashar/Documents/python dsp jupyter notebook/trump_data2.txt'
tweets_data = []
tweets_file = open(tweets_data_path, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
print (len(tweets_data))
import numpy as np
for i in np.arange(len(tweets_data)):
print(tweets_data[i]['text'])
tweets = pd.DataFrame()
tweets.loc[0:1,:]
tweets.loc[0:1,'text']
tweets['text'] = list(map(lambda tweet: tweet.get['text',''], tweets_data))
tweets['text'] = list(map(lambda tweet: tweet['text'], tweets_data))
tweets['lang'] = list(map(lambda tweet: tweet['lang'], tweets_data))
tweets['retweet_count'] = list(map(lambda tweet: tweet['retweet_count'], tweets_data))
tweets['type'] = list(map(lambda tweet: tweet['type'], tweets_data))
tweets_by_lang = tweets['lang'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Languages', fontsize=15)
ax.set_ylabel('Number of tweets' , fontsize=15)
ax.set_title('Top 5 languages', fontsize=15, fontweight='bold')
tweets_by_lang[:5].plot(ax=ax, kind='bar', color='red')
tweets_by_country = tweets['country'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Countries', fontsize=15)
ax.set_ylabel('Number of tweets' , fontsize=15)
ax.set_title('Top 5 countries', fontsize=15, fontweight='bold')
tweets_by_country[:5].plot(ax=ax, kind='bar', color='blue')
retweet_count = tweets['type'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Tweet type', fontsize=15)
ax.set_ylabel('Number of tweets' , fontsize=15)
ax.set_title('tweet type - photo, video, etc.', fontsize=15, fontweight='bold')
retweet_count[:3].plot(ax=ax, kind='bar', color='blue')
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
access_token = "----"
access_token_secret = "----"
consumer_key = "----"
consumer_secret = "----"
class StdOutListener(StreamListener):
def on_data(self, data):
print (data)
return True
def on_error(self, status):
print (status)
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=['trump'])
import json
import pandas as pd
import matplotlib.pyplot as plt
tweets_data_path = '../twitter_data.txt'
tweets_data = []
tweets_file = open(tweets_data_path, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
| true | true |
f7fda40d65049b9d477fa2a0a59bf4be27ea8b97 | 14,441 | py | Python | uqcsbot/scripts/trivia.py | dhood/uqcsbot | d36fb77a848d17d91ac8bd4c1b85000bac06f7c0 | [
"MIT"
] | 38 | 2017-04-13T01:15:05.000Z | 2021-07-25T08:22:20.000Z | uqcsbot/scripts/trivia.py | dhood/uqcsbot | d36fb77a848d17d91ac8bd4c1b85000bac06f7c0 | [
"MIT"
] | 302 | 2017-04-13T01:20:26.000Z | 2021-04-06T07:08:29.000Z | uqcsbot/scripts/trivia.py | dhood/uqcsbot | d36fb77a848d17d91ac8bd4c1b85000bac06f7c0 | [
"MIT"
] | 54 | 2017-05-04T23:18:29.000Z | 2021-04-07T17:57:16.000Z | import argparse
import base64
import json
import random
from datetime import datetime, timezone, timedelta
from functools import partial
from typing import List, Dict, Union, NamedTuple, Optional, Callable, Set
import requests
from uqcsbot import bot, Command
from uqcsbot.api import Channel
from uqcsbot.utils.command_utils import loading_status, UsageSyntaxException
API_URL = "https://opentdb.com/api.php"
CATEGORIES_URL = "https://opentdb.com/api_category.php"
# NamedTuple for use with the data returned from the api
QuestionData = NamedTuple('QuestionData',
[('type', str), ('question', str), ('correct_answer', str),
('answers', List[str]), ('is_boolean', bool)])
# Contains information about a reaction and the list of users who used said reaction
ReactionUsers = NamedTuple('ReactionUsers', [('name', str), ('users', Set[str])])
# Customisation options
# The interval between reactions being made for the possible answers (prevents order changing)
REACT_INTERVAL = 1
MIN_SECONDS = 5
MAX_SECONDS = 300
# The channels where multiple trivia questions can be asked (prevent spam)
VALID_SEQUETIAL_CHANNELS = ['trivia', 'bot-testing']
MAX_SEQUENTIAL_QUESTIONS = 30
BOOLEAN_REACTS = ['this', 'not-this'] # Format of [ <True>, <False> ]
# Colours should match CHOICE_COLORS
MULTIPLE_CHOICE_REACTS = ['green_heart', 'yellow_heart', 'heart', 'blue_heart']
CHOICE_COLORS = ['#6C9935', '#F3C200', '#B6281E', '#3176EF']
# What arguments to use for the cron job version
CRON_CHANNEL = 'trivia'
# (One day - 15 seconds) Overrides any -s argument below and ignores MAX_SECONDS rule
CRON_SECONDS = 86385
CRON_ARGUMENTS = ''
@bot.on_command('trivia')
@loading_status
def handle_trivia(command: Command):
"""
`!trivia [-d <easy|medium|hard>] [-c <CATEGORY>]
[-t <multiple|tf>] [-s <N>] [-n <N>] [--cats]`
- Asks a new trivia question
"""
args = parse_arguments(command.channel_id, command.arg if command.has_arg() else '')
# End early if the help option was used
if args.help:
return
# Send the possible categories
if args.cats:
bot.post_message(command.channel_id, get_categories())
return
# Check if the channel is valid for sequential questions
current_channel = bot.channels.get(command.channel_id)
if all([args.count > 1, not current_channel.is_im,
current_channel.name not in VALID_SEQUETIAL_CHANNELS]):
# If no valid channels are specified
if len(VALID_SEQUETIAL_CHANNELS) == 0:
bot.post_message(command.channel_id,
'This command can only be used in private messages with the bot')
return
first_valid = bot.channels.get(VALID_SEQUETIAL_CHANNELS[0])
channel_message = ''
if first_valid:
channel_message = f'Try <#{first_valid.id}|{VALID_SEQUETIAL_CHANNELS[0]}>.'
bot.post_message(command.channel_id, f'You cannot use the sequential questions '
+ f'feature in this channel. {channel_message}')
return
handle_question(command.channel_id, args)
def parse_arguments(channel: Channel, arg_string: str) -> argparse.Namespace:
"""
Parses the arguments for the command
:param command: The command which the handle_trivia function receives
:return: An argpase Namespace object with the parsed arguments
"""
parser = argparse.ArgumentParser(prog='!trivia', add_help=False)
def usage_error(*args, **kwargs):
raise UsageSyntaxException()
parser.error = usage_error # type: ignore
parser.add_argument('-d', '--difficulty', choices=['easy', 'medium', 'hard'],
default='random', type=str.lower,
help='The difficulty of the question. (default: %(default)s)')
parser.add_argument('-c', '--category', default=-1, type=int,
help='Specifies a category (default: any)')
parser.add_argument('-t', '--type', choices=['boolean', 'multiple'],
default="random", type=str.lower,
help='The type of question. (default: %(default)s)')
parser.add_argument('-s', '--seconds', default=30, type=int,
help='Number of seconds before posting answer (default: %(default)s)')
parser.add_argument('-n', '--count', default=1, type=int, help=f"Do 'n' trivia questions in "
f"quick succession (max : {MAX_SEQUENTIAL_QUESTIONS})")
parser.add_argument('--cats', action='store_true',
help='Sends a list of valid categories to the user')
parser.add_argument('-h', '--help', action='store_true', help='Prints this help message')
args = parser.parse_args(arg_string.split())
# If the help option was used print the help message to
# the channel (needs access to the parser to do this)
if args.help:
bot.post_message(channel, parser.format_help())
# Constrain the number of seconds to a reasonable frame
args.seconds = max(MIN_SECONDS, args.seconds)
args.seconds = min(args.seconds, MAX_SECONDS)
# Constrain the number of sequential questions
args.count = max(args.count, 1)
args.count = min(args.count, MAX_SEQUENTIAL_QUESTIONS)
# Add an original count to keep track
args.original_count = args.count
return args
def get_categories() -> str:
"""
Gets the message to send if the user wants a list of the available categories.
"""
http_response = requests.get(CATEGORIES_URL)
if http_response.status_code != requests.codes.ok:
return "There was a problem getting the response"
categories = json.loads(http_response.content)['trivia_categories']
# Construct pretty results to print in a code block to avoid a large spammy message
pretty_results = '```Use the id to specify a specific category \n\nID Name\n'
for category in categories:
pretty_results += f'{category["id"]:<4d}{category["name"]}\n'
pretty_results += '```'
return pretty_results
def handle_question(channel: Channel, args: argparse.Namespace):
"""
Handles getting a question and posting it to the channel as well as scheduling the answer.
Returns the reaction string for the correct answer.
"""
question_data = get_question_data(channel, args)
if question_data is None:
return
question_number = args.original_count - args.count + 1
prefix = f'Q{question_number}:' if args.original_count > 1 else ''
post_question(channel, question_data, prefix)
# Get the answer message
if question_data.is_boolean:
if question_data.correct_answer == 'True':
answer_text = f':{BOOLEAN_REACTS[0]}:'
else:
answer_text = f':{BOOLEAN_REACTS[1]}:'
else:
answer_text = question_data.correct_answer
answer_message = f'The answer to the question *{question_data.question}* is: *{answer_text}*'
# Schedule the answer to be posted after the specified number of seconds has passed
post_answer = partial(bot.post_message, channel, answer_message)
schedule_action(post_answer, args.seconds)
# If more questions are to be asked schedule the question for 5 seconds after the current answer
if args.count > 1:
args.count -= 1
schedule_action(partial(handle_question, channel, args), args.seconds + 5)
def get_question_data(channel: Channel, args: argparse.Namespace) -> Optional[QuestionData]:
"""
Attempts to get a question from the api using the specified arguments.
Returns the dictionary object for the question on success
and None on failure (after posting an error message).
"""
# Base64 to help with encoding the message for slack
params: Dict[str, Union[int, str]] = {'amount': 1, 'encode': 'base64'}
# Add in any explicitly specified arguments
if args.category != -1:
params['category'] = args.category
if args.difficulty != 'random':
params['difficulty'] = args.difficulty
if args.type != 'random':
params['type'] = args.type
# Get the response and check that it is valid
http_response = requests.get(API_URL, params=params)
if http_response.status_code != requests.codes.ok:
bot.post_message(channel, "There was a problem getting the response")
return None
# Check the response codes and post a useful message in the case of an error
response_content = json.loads(http_response.content)
if response_content['response_code'] == 2:
bot.post_message(channel, "Invalid category id. "
+ "Try !trivia --cats for a list of valid categories.")
return None
elif response_content['response_code'] != 0:
bot.post_message(channel, "No results were returned")
return None
question_data = response_content['results'][0]
# Get the type of question and make the NamedTuple container for the data
is_boolean = len(question_data['incorrect_answers']) == 1
answers = [question_data['correct_answer']] + question_data['incorrect_answers']
# Delete the ones we don't need
del question_data['category']
del question_data['difficulty']
del question_data['incorrect_answers']
# Decode the ones we want. The base 64 decoding ensures
# that the formatting works properly with slack.
question_data['question'] = decode_b64(question_data['question'])
question_data['correct_answer'] = decode_b64(question_data['correct_answer'])
answers = [decode_b64(ans) for ans in answers]
question_data = QuestionData(is_boolean=is_boolean, answers=answers, **question_data)
# Shuffle the answers
random.shuffle(question_data.answers)
return question_data
def post_question(channel: Channel, question_data: QuestionData, prefix: str = '') -> float:
"""
Posts the question from the given QuestionData along with
the possible answers list if applicable.
Also creates the answer reacts.
Returns the timestamp of the posted message.
"""
# Post the question and get the timestamp for the reactions (asterisks bold it)
message_ts = bot.post_message(channel, f'*{prefix} {question_data.question}*')['ts']
# Print the questions (if multiple choice) and add the answer reactions
reactions = BOOLEAN_REACTS if question_data.is_boolean else MULTIPLE_CHOICE_REACTS
if not question_data.is_boolean:
message_ts = post_possible_answers(channel, question_data.answers)
add_reactions_interval(reactions, channel, message_ts, REACT_INTERVAL)
return message_ts
def add_reactions_interval(reactions: List[str], channel: Channel,
msg_timestamp: str, interval: float = 1):
"""
Adds the given reactions with "interval" seconds between in order
to prevent them from changing order in slack (as slack uses the
timestamp of when the reaction was added to determine the order).
:param reactions: The reactions to add
:param channel: The channel containing the desired message to react to
:param msg_timestamp: The timestamp of the required message
:param interval: The interval between posting each reaction (defaults to 1 second)
"""
# If the react interval is 0 don't do any of the scheduling stuff
if REACT_INTERVAL == 0:
for reaction in reactions:
bot.api.reactions.add(name=reaction, channel=channel, timestamp=msg_timestamp)
return
# Do the first one immediately
bot.api.reactions.add(name=reactions[0], channel=channel, timestamp=msg_timestamp)
# I am not 100% sure why this is needed. Doing it with a normal partial or
# lambda will try to post the same reacts
def add_reaction(reaction: str):
bot.api.reactions.add(name=reaction, channel=channel, timestamp=msg_timestamp)
for index, reaction in enumerate(reactions[1:]):
delay = (index + 1) * interval
schedule_action(partial(add_reaction, reaction), delay)
def decode_b64(encoded: str) -> str:
"""
Takes a base64 encoded string. Returns the decoded version to utf-8.
"""
return base64.b64decode(encoded).decode('utf-8')
def get_correct_reaction(question_data: QuestionData):
"""
Returns the reaction that matches with the correct answer
"""
if question_data.is_boolean:
if question_data.correct_answer == 'True':
correct_reaction = BOOLEAN_REACTS[0]
else:
correct_reaction = BOOLEAN_REACTS[1]
else:
correct_reaction = MULTIPLE_CHOICE_REACTS[
question_data.answers.index(question_data.correct_answer)]
return correct_reaction
def post_possible_answers(channel: Channel, answers: List[str]) -> float:
"""
Posts the possible answers for a multiple choice question in a nice way.
Returns the timestamp of the message to allow reacting to it.
"""
attachments = []
for col, answer in zip(CHOICE_COLORS, answers):
ans_att = {'text': answer, 'color': col}
attachments.append(ans_att)
return bot.post_message(channel, '', attachments=attachments)['ts']
def schedule_action(action: Callable, secs: Union[int, float]):
"""
Schedules the supplied action to be called once in the given number of seconds.
"""
run_date = datetime.now(timezone(timedelta(hours=10))) + timedelta(seconds=secs)
bot._scheduler.add_job(action, 'date', run_date=run_date)
@bot.on_schedule('cron', hour=12, timezone='Australia/Brisbane')
def daily_trivia():
"""
Adds a job that displays a random question to the specified channel at lunch time
"""
channel = bot.channels.get(CRON_CHANNEL).id
# Get arguments and update the seconds
args = parse_arguments(channel, CRON_ARGUMENTS)
args.seconds = CRON_SECONDS
# Get and post the actual question
handle_question(channel, args)
# Format a nice message to tell when the answer will be
hours = CRON_SECONDS // 3600
minutes = (CRON_SECONDS - (hours * 3600)) // 60
if minutes > 55:
hours += 1
minutes = 0
time_until_answer = 'Answer in '
if hours > 0:
time_until_answer += f'{hours} hours'
if minutes > 0:
time_until_answer += f' and {minutes} minutes' if hours > 0 else f'{minutes} minutes'
bot.post_message(channel, time_until_answer)
| 38.30504 | 100 | 0.685202 | import argparse
import base64
import json
import random
from datetime import datetime, timezone, timedelta
from functools import partial
from typing import List, Dict, Union, NamedTuple, Optional, Callable, Set
import requests
from uqcsbot import bot, Command
from uqcsbot.api import Channel
from uqcsbot.utils.command_utils import loading_status, UsageSyntaxException
API_URL = "https://opentdb.com/api.php"
CATEGORIES_URL = "https://opentdb.com/api_category.php"
QuestionData = NamedTuple('QuestionData',
[('type', str), ('question', str), ('correct_answer', str),
('answers', List[str]), ('is_boolean', bool)])
ReactionUsers = NamedTuple('ReactionUsers', [('name', str), ('users', Set[str])])
REACT_INTERVAL = 1
MIN_SECONDS = 5
MAX_SECONDS = 300
VALID_SEQUETIAL_CHANNELS = ['trivia', 'bot-testing']
MAX_SEQUENTIAL_QUESTIONS = 30
BOOLEAN_REACTS = ['this', 'not-this']
MULTIPLE_CHOICE_REACTS = ['green_heart', 'yellow_heart', 'heart', 'blue_heart']
CHOICE_COLORS = ['#6C9935', '#F3C200', '#B6281E', '#3176EF']
CRON_CHANNEL = 'trivia'
CRON_SECONDS = 86385
CRON_ARGUMENTS = ''
@bot.on_command('trivia')
@loading_status
def handle_trivia(command: Command):
args = parse_arguments(command.channel_id, command.arg if command.has_arg() else '')
if args.help:
return
if args.cats:
bot.post_message(command.channel_id, get_categories())
return
current_channel = bot.channels.get(command.channel_id)
if all([args.count > 1, not current_channel.is_im,
current_channel.name not in VALID_SEQUETIAL_CHANNELS]):
if len(VALID_SEQUETIAL_CHANNELS) == 0:
bot.post_message(command.channel_id,
'This command can only be used in private messages with the bot')
return
first_valid = bot.channels.get(VALID_SEQUETIAL_CHANNELS[0])
channel_message = ''
if first_valid:
channel_message = f'Try <#{first_valid.id}|{VALID_SEQUETIAL_CHANNELS[0]}>.'
bot.post_message(command.channel_id, f'You cannot use the sequential questions '
+ f'feature in this channel. {channel_message}')
return
handle_question(command.channel_id, args)
def parse_arguments(channel: Channel, arg_string: str) -> argparse.Namespace:
parser = argparse.ArgumentParser(prog='!trivia', add_help=False)
def usage_error(*args, **kwargs):
raise UsageSyntaxException()
parser.error = usage_error
parser.add_argument('-d', '--difficulty', choices=['easy', 'medium', 'hard'],
default='random', type=str.lower,
help='The difficulty of the question. (default: %(default)s)')
parser.add_argument('-c', '--category', default=-1, type=int,
help='Specifies a category (default: any)')
parser.add_argument('-t', '--type', choices=['boolean', 'multiple'],
default="random", type=str.lower,
help='The type of question. (default: %(default)s)')
parser.add_argument('-s', '--seconds', default=30, type=int,
help='Number of seconds before posting answer (default: %(default)s)')
parser.add_argument('-n', '--count', default=1, type=int, help=f"Do 'n' trivia questions in "
f"quick succession (max : {MAX_SEQUENTIAL_QUESTIONS})")
parser.add_argument('--cats', action='store_true',
help='Sends a list of valid categories to the user')
parser.add_argument('-h', '--help', action='store_true', help='Prints this help message')
args = parser.parse_args(arg_string.split())
if args.help:
bot.post_message(channel, parser.format_help())
args.seconds = max(MIN_SECONDS, args.seconds)
args.seconds = min(args.seconds, MAX_SECONDS)
args.count = max(args.count, 1)
args.count = min(args.count, MAX_SEQUENTIAL_QUESTIONS)
args.original_count = args.count
return args
def get_categories() -> str:
http_response = requests.get(CATEGORIES_URL)
if http_response.status_code != requests.codes.ok:
return "There was a problem getting the response"
categories = json.loads(http_response.content)['trivia_categories']
pretty_results = '```Use the id to specify a specific category \n\nID Name\n'
for category in categories:
pretty_results += f'{category["id"]:<4d}{category["name"]}\n'
pretty_results += '```'
return pretty_results
def handle_question(channel: Channel, args: argparse.Namespace):
question_data = get_question_data(channel, args)
if question_data is None:
return
question_number = args.original_count - args.count + 1
prefix = f'Q{question_number}:' if args.original_count > 1 else ''
post_question(channel, question_data, prefix)
if question_data.is_boolean:
if question_data.correct_answer == 'True':
answer_text = f':{BOOLEAN_REACTS[0]}:'
else:
answer_text = f':{BOOLEAN_REACTS[1]}:'
else:
answer_text = question_data.correct_answer
answer_message = f'The answer to the question *{question_data.question}* is: *{answer_text}*'
post_answer = partial(bot.post_message, channel, answer_message)
schedule_action(post_answer, args.seconds)
if args.count > 1:
args.count -= 1
schedule_action(partial(handle_question, channel, args), args.seconds + 5)
def get_question_data(channel: Channel, args: argparse.Namespace) -> Optional[QuestionData]:
params: Dict[str, Union[int, str]] = {'amount': 1, 'encode': 'base64'}
if args.category != -1:
params['category'] = args.category
if args.difficulty != 'random':
params['difficulty'] = args.difficulty
if args.type != 'random':
params['type'] = args.type
http_response = requests.get(API_URL, params=params)
if http_response.status_code != requests.codes.ok:
bot.post_message(channel, "There was a problem getting the response")
return None
response_content = json.loads(http_response.content)
if response_content['response_code'] == 2:
bot.post_message(channel, "Invalid category id. "
+ "Try !trivia --cats for a list of valid categories.")
return None
elif response_content['response_code'] != 0:
bot.post_message(channel, "No results were returned")
return None
question_data = response_content['results'][0]
is_boolean = len(question_data['incorrect_answers']) == 1
answers = [question_data['correct_answer']] + question_data['incorrect_answers']
del question_data['category']
del question_data['difficulty']
del question_data['incorrect_answers']
# Decode the ones we want. The base 64 decoding ensures
# that the formatting works properly with slack.
question_data['question'] = decode_b64(question_data['question'])
question_data['correct_answer'] = decode_b64(question_data['correct_answer'])
answers = [decode_b64(ans) for ans in answers]
question_data = QuestionData(is_boolean=is_boolean, answers=answers, **question_data)
# Shuffle the answers
random.shuffle(question_data.answers)
return question_data
def post_question(channel: Channel, question_data: QuestionData, prefix: str = '') -> float:
# Post the question and get the timestamp for the reactions (asterisks bold it)
message_ts = bot.post_message(channel, f'*{prefix} {question_data.question}*')['ts']
# Print the questions (if multiple choice) and add the answer reactions
reactions = BOOLEAN_REACTS if question_data.is_boolean else MULTIPLE_CHOICE_REACTS
if not question_data.is_boolean:
message_ts = post_possible_answers(channel, question_data.answers)
add_reactions_interval(reactions, channel, message_ts, REACT_INTERVAL)
return message_ts
def add_reactions_interval(reactions: List[str], channel: Channel,
msg_timestamp: str, interval: float = 1):
# If the react interval is 0 don't do any of the scheduling stuff
if REACT_INTERVAL == 0:
for reaction in reactions:
bot.api.reactions.add(name=reaction, channel=channel, timestamp=msg_timestamp)
return
bot.api.reactions.add(name=reactions[0], channel=channel, timestamp=msg_timestamp)
def add_reaction(reaction: str):
bot.api.reactions.add(name=reaction, channel=channel, timestamp=msg_timestamp)
for index, reaction in enumerate(reactions[1:]):
delay = (index + 1) * interval
schedule_action(partial(add_reaction, reaction), delay)
def decode_b64(encoded: str) -> str:
return base64.b64decode(encoded).decode('utf-8')
def get_correct_reaction(question_data: QuestionData):
if question_data.is_boolean:
if question_data.correct_answer == 'True':
correct_reaction = BOOLEAN_REACTS[0]
else:
correct_reaction = BOOLEAN_REACTS[1]
else:
correct_reaction = MULTIPLE_CHOICE_REACTS[
question_data.answers.index(question_data.correct_answer)]
return correct_reaction
def post_possible_answers(channel: Channel, answers: List[str]) -> float:
attachments = []
for col, answer in zip(CHOICE_COLORS, answers):
ans_att = {'text': answer, 'color': col}
attachments.append(ans_att)
return bot.post_message(channel, '', attachments=attachments)['ts']
def schedule_action(action: Callable, secs: Union[int, float]):
run_date = datetime.now(timezone(timedelta(hours=10))) + timedelta(seconds=secs)
bot._scheduler.add_job(action, 'date', run_date=run_date)
@bot.on_schedule('cron', hour=12, timezone='Australia/Brisbane')
def daily_trivia():
channel = bot.channels.get(CRON_CHANNEL).id
args = parse_arguments(channel, CRON_ARGUMENTS)
args.seconds = CRON_SECONDS
handle_question(channel, args)
hours = CRON_SECONDS // 3600
minutes = (CRON_SECONDS - (hours * 3600)) // 60
if minutes > 55:
hours += 1
minutes = 0
time_until_answer = 'Answer in '
if hours > 0:
time_until_answer += f'{hours} hours'
if minutes > 0:
time_until_answer += f' and {minutes} minutes' if hours > 0 else f'{minutes} minutes'
bot.post_message(channel, time_until_answer)
| true | true |
f7fda4f14479365f169cc9e649d6252b9f3f754e | 2,481 | py | Python | losses.py | stevezhangz/ournn | a5d8383971e9a921c38380507f1abbd93a88ca88 | [
"Apache-2.0"
] | 2 | 2021-04-05T02:28:59.000Z | 2021-11-20T01:24:17.000Z | ournn/losses.py | stevezhangz/ournn | a5d8383971e9a921c38380507f1abbd93a88ca88 | [
"Apache-2.0"
] | null | null | null | ournn/losses.py | stevezhangz/ournn | a5d8383971e9a921c38380507f1abbd93a88ca88 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from ournn.tools.matrix_tools import dot_mul2d
import math
class MSE:
def __init__(self):
pass
def loss(self,x,y,delta=1e-3):
if isinstance(x,int) or isinstance(x,float):
if isinstance(y, int) or isinstance(y, float):
return (x-y)*(x-y)
assert x.shape==y.shape
add_=0
self.err = np.square((x - y))
return self.err
def __call__(self,x,y):
self.x=x
self.y=y
return self.loss(x,y)
def backward(self):
return 2*(self.x-self.y)
"""
Cross entropy
"""
class sparse_logit_cross_entropy:
def __init__(self):
pass
def loss(self,x,y):
if isinstance(x,int) or isinstance(x,float):
if isinstance(y, int) or isinstance(y, float):
return -y*np.log(x)
x=x.reshape(y.shape)
assert x.shape==y.shape
out=-np.log(x)*y
return out
def __call__(self, x,y):
self.x=x
self.y=y
return self.loss(x,y)
def backward(self):
if isinstance(self.x,int) or isinstance(self.x,float):
if isinstance(self.y, int) or isinstance(self.y, float):
return self.y/(self.x)
self.x=self.x.reshape(self.y.shape)
cross_entropy=[]
assert self.x.shape==self.y.shape
out=-(1/(self.x))*self.y
return out
"""
The predicted values were processed by softmax and then calculated by cross entropy
In another word the last layer of the Model dont have to use the softmax act function
"""
class sparse_softmax_cross_entropy:
def __init__(self):
pass
def loss(self,x,y,logit=sparse_logit_cross_entropy(),down_delta=1e-3,upsume=1e5):
self.x=x
self.y=y
if isinstance(x,int) or isinstance(x,float):
raise FileExistsError
assert x.shape==y.shape
out=[]
x+=1e-5
for i in range(x.shape[0]):
line_sotmax=[]
line_sotmax.append((x[i,:]/(np.sum(x[i,:]))))
out.append(line_sotmax)
out=np.squeeze(np.array(out))
cross_entropy_out=logit(out,y)
self.logit=logit
self.softout=out
return cross_entropy_out
def __call__(self,x,y):
return self.loss(x,y)
def backward(self):
logit_back=self.logit.backward()
exp_x_n=1/(np.exp(-(self.x))+1e-5)
bac=self.softout*(-1+self.softout/exp_x_n)*logit_back
return bac | 28.193182 | 85 | 0.584442 | import numpy as np
from ournn.tools.matrix_tools import dot_mul2d
import math
class MSE:
def __init__(self):
pass
def loss(self,x,y,delta=1e-3):
if isinstance(x,int) or isinstance(x,float):
if isinstance(y, int) or isinstance(y, float):
return (x-y)*(x-y)
assert x.shape==y.shape
add_=0
self.err = np.square((x - y))
return self.err
def __call__(self,x,y):
self.x=x
self.y=y
return self.loss(x,y)
def backward(self):
return 2*(self.x-self.y)
class sparse_logit_cross_entropy:
def __init__(self):
pass
def loss(self,x,y):
if isinstance(x,int) or isinstance(x,float):
if isinstance(y, int) or isinstance(y, float):
return -y*np.log(x)
x=x.reshape(y.shape)
assert x.shape==y.shape
out=-np.log(x)*y
return out
def __call__(self, x,y):
self.x=x
self.y=y
return self.loss(x,y)
def backward(self):
if isinstance(self.x,int) or isinstance(self.x,float):
if isinstance(self.y, int) or isinstance(self.y, float):
return self.y/(self.x)
self.x=self.x.reshape(self.y.shape)
cross_entropy=[]
assert self.x.shape==self.y.shape
out=-(1/(self.x))*self.y
return out
class sparse_softmax_cross_entropy:
def __init__(self):
pass
def loss(self,x,y,logit=sparse_logit_cross_entropy(),down_delta=1e-3,upsume=1e5):
self.x=x
self.y=y
if isinstance(x,int) or isinstance(x,float):
raise FileExistsError
assert x.shape==y.shape
out=[]
x+=1e-5
for i in range(x.shape[0]):
line_sotmax=[]
line_sotmax.append((x[i,:]/(np.sum(x[i,:]))))
out.append(line_sotmax)
out=np.squeeze(np.array(out))
cross_entropy_out=logit(out,y)
self.logit=logit
self.softout=out
return cross_entropy_out
def __call__(self,x,y):
return self.loss(x,y)
def backward(self):
logit_back=self.logit.backward()
exp_x_n=1/(np.exp(-(self.x))+1e-5)
bac=self.softout*(-1+self.softout/exp_x_n)*logit_back
return bac | true | true |
f7fda6cdb459a8f9f20428e30d2c2938e76bca2f | 5,097 | py | Python | BaseTools/Plugin/CompilerPlugin/CompilerPlugin.py | changeworld/mu_basecore | 1a883ec85d8d2f49663c76e1a1bc5068333f5508 | [
"BSD-2-Clause"
] | null | null | null | BaseTools/Plugin/CompilerPlugin/CompilerPlugin.py | changeworld/mu_basecore | 1a883ec85d8d2f49663c76e1a1bc5068333f5508 | [
"BSD-2-Clause"
] | null | null | null | BaseTools/Plugin/CompilerPlugin/CompilerPlugin.py | changeworld/mu_basecore | 1a883ec85d8d2f49663c76e1a1bc5068333f5508 | [
"BSD-2-Clause"
] | 1 | 2021-11-01T19:33:11.000Z | 2021-11-01T19:33:11.000Z | # @file Compiler_plugin.py
# Simple Project Mu Build Plugin to support
# compiling code
##
# Copyright (c) 2018, Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
###
import logging
from MuEnvironment.PluginManager import IMuBuildPlugin
from MuEnvironment.UefiBuild import UefiBuilder
import os
import re
class CompilerPlugin(IMuBuildPlugin):
# gets the tests name
def GetTestName(self, packagename, environment):
target = environment.GetValue("TARGET")
return ("MuBuild Compile " + target + " " + packagename, "MuBuild.CompileCheck." + target + "." + packagename)
def IsTargetDependent(self):
return True
# External function of plugin. This function is used to perform the task of the MuBuild Plugin
# - package is the edk2 path to package. This means workspace/packagepath relative.
# - edk2path object configured with workspace and packages path
# - any additional command line args
# - RepoConfig Object (dict) for the build
# - PkgConfig Object (dict)
# - EnvConfig Object
# - Plugin Manager Instance
# - Plugin Helper Obj Instance
# - testcase Object used for outputing junit results
# - output_stream the StringIO output stream from this plugin
def RunBuildPlugin(self, packagename, Edk2pathObj, args, repoconfig, pkgconfig, environment, PLM, PLMHelper, tc, output_stream = None):
self._env = environment
AP = Edk2pathObj.GetAbsolutePathOnThisSytemFromEdk2RelativePath(packagename)
APDSC = self.get_dsc_name_in_dir(AP)
AP_Path = Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(APDSC)
logging.info("Building {0}".format(AP_Path))
if AP is None or AP_Path is None or not os.path.isfile(APDSC):
tc.SetSkipped()
tc.LogStdError("1 warning(s) in {0} Compile. DSC not found.".format(packagename))
return 0
self._env.SetValue("ACTIVE_PLATFORM", AP_Path, "Set in Compiler Plugin")
# WorkSpace, PackagesPath, PInManager, PInHelper, args, BuildConfigFile=None):
uefiBuilder = UefiBuilder(Edk2pathObj.WorkspacePath, os.pathsep.join(Edk2pathObj.PackagePathList), PLM, PLMHelper, args)
# do all the steps
ret = uefiBuilder.Go()
if ret != 0: # failure:
if output_stream is not None:
try:
# seek to the start of the output stream
output_stream.seek(0, 0)
error_exp = re.compile(r"error C(\d+):")
linker_error_exp = re.compile(r"error LNK(\d+):")
warning_exp = re.compile(r"warning C(\d+):")
for line in output_stream.readlines():
match = error_exp.search(line)
if match is not None:
tc.LogStdError("Compile: Error: {0}".format(line))
match = warning_exp.search(line)
if match is not None:
tc.LogStdOut("Compile: Warning: {0}".format(line))
match = linker_error_exp.search(line)
if match is not None:
tc.LogStdError("Linker: Error: {0}".format(line))
# we might fail if uefiBuilder doesn't have the output stream (if we have an older mu_enviroment for whatever reason)
except AttributeError:
pass # if we do fail we can ignore it since it just means we can't put more explicit output into the xml
tc.SetFailed("Compile failed for {0}".format(packagename), "Compile_FAILED")
tc.LogStdError("{0} Compile failed with error code {1}".format(AP_Path, ret))
return 1
else:
tc.SetSuccess()
return 0
| 49.485437 | 139 | 0.664312 |
mport logging
from MuEnvironment.PluginManager import IMuBuildPlugin
from MuEnvironment.UefiBuild import UefiBuilder
import os
import re
class CompilerPlugin(IMuBuildPlugin):
def GetTestName(self, packagename, environment):
target = environment.GetValue("TARGET")
return ("MuBuild Compile " + target + " " + packagename, "MuBuild.CompileCheck." + target + "." + packagename)
def IsTargetDependent(self):
return True
def RunBuildPlugin(self, packagename, Edk2pathObj, args, repoconfig, pkgconfig, environment, PLM, PLMHelper, tc, output_stream = None):
self._env = environment
AP = Edk2pathObj.GetAbsolutePathOnThisSytemFromEdk2RelativePath(packagename)
APDSC = self.get_dsc_name_in_dir(AP)
AP_Path = Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(APDSC)
logging.info("Building {0}".format(AP_Path))
if AP is None or AP_Path is None or not os.path.isfile(APDSC):
tc.SetSkipped()
tc.LogStdError("1 warning(s) in {0} Compile. DSC not found.".format(packagename))
return 0
self._env.SetValue("ACTIVE_PLATFORM", AP_Path, "Set in Compiler Plugin")
uefiBuilder = UefiBuilder(Edk2pathObj.WorkspacePath, os.pathsep.join(Edk2pathObj.PackagePathList), PLM, PLMHelper, args)
ret = uefiBuilder.Go()
if ret != 0:
if output_stream is not None:
try:
output_stream.seek(0, 0)
error_exp = re.compile(r"error C(\d+):")
linker_error_exp = re.compile(r"error LNK(\d+):")
warning_exp = re.compile(r"warning C(\d+):")
for line in output_stream.readlines():
match = error_exp.search(line)
if match is not None:
tc.LogStdError("Compile: Error: {0}".format(line))
match = warning_exp.search(line)
if match is not None:
tc.LogStdOut("Compile: Warning: {0}".format(line))
match = linker_error_exp.search(line)
if match is not None:
tc.LogStdError("Linker: Error: {0}".format(line))
except AttributeError:
pass # if we do fail we can ignore it since it just means we can't put more explicit output into the xml
tc.SetFailed("Compile failed for {0}".format(packagename), "Compile_FAILED")
tc.LogStdError("{0} Compile failed with error code {1}".format(AP_Path, ret))
return 1
else:
tc.SetSuccess()
return 0
| true | true |
f7fda70d6ddc238f16a588ac5b140c6453a27f89 | 3,997 | py | Python | alipay/aop/api/request/AlipayUserJobcardJobsupplierQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayUserJobcardJobsupplierQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayUserJobcardJobsupplierQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayUserJobcardJobsupplierQueryModel import AlipayUserJobcardJobsupplierQueryModel
class AlipayUserJobcardJobsupplierQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayUserJobcardJobsupplierQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayUserJobcardJobsupplierQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.user.jobcard.jobsupplier.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.565517 | 148 | 0.646985 |
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayUserJobcardJobsupplierQueryModel import AlipayUserJobcardJobsupplierQueryModel
class AlipayUserJobcardJobsupplierQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayUserJobcardJobsupplierQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayUserJobcardJobsupplierQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.user.jobcard.jobsupplier.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
f7fda7d77a841a81cf90cfa8d6ab27e956a1b1e9 | 5,156 | py | Python | app/eMenu/settings.py | asawicki96/eMenu | 39850ea9a25932462fdb12c7f6b1341ad94266bc | [
"MIT"
] | null | null | null | app/eMenu/settings.py | asawicki96/eMenu | 39850ea9a25932462fdb12c7f6b1341ad94266bc | [
"MIT"
] | null | null | null | app/eMenu/settings.py | asawicki96/eMenu | 39850ea9a25932462fdb12c7f6b1341ad94266bc | [
"MIT"
] | null | null | null | """
Django settings for eMenu project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
from datetime import timedelta
import os
import sys
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#wl5z(!)p0nau_d=sp_7)58870%7l=yp(_mu4w*ceswoq_i(=t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third-Party Apps
'drf_yasg',
'rest_framework',
'rest_framework.authtoken',
'django_filters',
# Local Apps
'core',
'cards',
'dishes'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'eMenu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'eMenu.wsgi.application'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_MAIL = 'emenu@emenu.pl'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('POSTGRES_DB'),
'USER': os.getenv('POSTGRES_USER'),
'PASSWORD': os.getenv('POSTGRES_PASSWORD'),
'HOST': os.getenv('POSTGRES_HOST'),
'PORT': os.getenv('POSTGRES_PORT'),
}
}
if 'test' in sys.argv:
DATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3'}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Authentication
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication'
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticatedOrReadOnly'
],
'DEFAULT_FILTER_BACKENDS': [
'django_filters.rest_framework.DjangoFilterBackend',
],
'DEFAULT_PAGINATION_CLASS': 'core.pagination.StandardResultsSetPagination',
}
# Celery Configuration Options
CELERY_TIMEZONE = "Europe/Warsaw"
CELERY_TASK_TRACK_STARTED = True
CELERY_TASK_TIME_LIMIT = 30 * 60
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL', default='redis://redis:6379')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', default='redis://redis:6379')
CELERY_ACCEPT_CONTENT = os.getenv('CELERY_ACCEPT_CONTENT', default=['application/json'])
CELERY_TASK_SERIALIZER = os.getenv('CELERY_TASK_SERIALIZER', default='json')
CELERY_RESULT_SERIALIZER = os.getenv('CELERY_RESULT_SERIALIZER', default='json')
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'pl'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/backend/media'
STATIC_ROOT = '/vol/backend/static'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
RECENT_DISH_TIMEDELTA = timedelta(days=1) | 27.87027 | 91 | 0.712762 |
from pathlib import Path
from datetime import timedelta
import os
import sys
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-#wl5z(!)p0nau_d=sp_7)58870%7l=yp(_mu4w*ceswoq_i(=t'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third-Party Apps
'drf_yasg',
'rest_framework',
'rest_framework.authtoken',
'django_filters',
# Local Apps
'core',
'cards',
'dishes'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'eMenu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'eMenu.wsgi.application'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_MAIL = 'emenu@emenu.pl'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('POSTGRES_DB'),
'USER': os.getenv('POSTGRES_USER'),
'PASSWORD': os.getenv('POSTGRES_PASSWORD'),
'HOST': os.getenv('POSTGRES_HOST'),
'PORT': os.getenv('POSTGRES_PORT'),
}
}
if 'test' in sys.argv:
DATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3'}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Authentication
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication'
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticatedOrReadOnly'
],
'DEFAULT_FILTER_BACKENDS': [
'django_filters.rest_framework.DjangoFilterBackend',
],
'DEFAULT_PAGINATION_CLASS': 'core.pagination.StandardResultsSetPagination',
}
CELERY_TIMEZONE = "Europe/Warsaw"
CELERY_TASK_TRACK_STARTED = True
CELERY_TASK_TIME_LIMIT = 30 * 60
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL', default='redis://redis:6379')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', default='redis://redis:6379')
CELERY_ACCEPT_CONTENT = os.getenv('CELERY_ACCEPT_CONTENT', default=['application/json'])
CELERY_TASK_SERIALIZER = os.getenv('CELERY_TASK_SERIALIZER', default='json')
CELERY_RESULT_SERIALIZER = os.getenv('CELERY_RESULT_SERIALIZER', default='json')
LANGUAGE_CODE = 'pl'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = False
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/backend/media'
STATIC_ROOT = '/vol/backend/static'
D = 'django.db.models.BigAutoField'
RECENT_DISH_TIMEDELTA = timedelta(days=1) | true | true |
f7fda824e4beae2e7ee28f17f89463e0215fa794 | 9,625 | py | Python | venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_setops.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_setops.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_setops.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
import pandas as pd
from pandas import Int64Index, TimedeltaIndex, timedelta_range
import pandas._testing as tm
from pandas.tseries.offsets import Hour
class TestTimedeltaIndex:
def test_union(self):
i1 = timedelta_range("1day", periods=5)
i2 = timedelta_range("3day", periods=5)
result = i1.union(i2)
expected = timedelta_range("1day", periods=7)
tm.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = timedelta_range(start="1 day", periods=10, freq="D")
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_sort_false(self):
tdi = timedelta_range("1day", periods=5)
left = tdi[3:]
right = tdi[:3]
# Check that we are testing the desired code path
assert left._can_fast_union(right)
result = left.union(right)
tm.assert_index_equal(result, tdi)
result = left.union(right, sort=False)
expected = pd.TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"])
tm.assert_index_equal(result, expected)
def test_union_coverage(self):
idx = TimedeltaIndex(["3d", "1d", "2d"])
ordered = TimedeltaIndex(idx.sort_values(), freq="infer")
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = timedelta_range("1 day", periods=4, freq="3H")
rng_b = timedelta_range("1 day", periods=4, freq="4H")
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(rng_a) | set(rng_b)))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(["1 day 15:19:49.695000"])
right = TimedeltaIndex(
["2 day 13:04:21.322000", "1 day 15:27:24.873000", "1 day 15:31:05.350000"]
)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_freq_infer(self):
# When taking the union of two TimedeltaIndexes, we infer
# a freq even if the arguments don't have freq. This matches
# DatetimeIndex behavior.
tdi = pd.timedelta_range("1 Day", periods=5)
left = tdi[[0, 1, 3, 4]]
right = tdi[[2, 3, 1]]
assert left.freq is None
assert right.freq is None
result = left.union(right)
tm.assert_index_equal(result, tdi)
assert result.freq == "D"
def test_intersection_bug_1708(self):
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
assert len(result) == 0
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range("1 day 01:00:00", periods=3, freq="h")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_intersection_equal(self, sort):
# GH 24471 Test intersection outcome given the sort keyword
# for equal indicies intersection should return the original index
first = timedelta_range("1 day", periods=4, freq="h")
second = timedelta_range("1 day", periods=4, freq="h")
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)])
def test_intersection_zero_length(self, period_1, period_2, sort):
# GH 24471 test for non overlap the intersection should be zero length
index_1 = timedelta_range("1 day", periods=period_1, freq="h")
index_2 = timedelta_range("1 day", periods=period_2, freq="h")
expected = timedelta_range("1 day", periods=0, freq="h")
result = index_1.intersection(index_2, sort=sort)
tm.assert_index_equal(result, expected)
def test_zero_length_input_index(self, sort):
# GH 24966 test for 0-len intersections are copied
index_1 = timedelta_range("1 day", periods=0, freq="h")
index_2 = timedelta_range("1 day", periods=3, freq="h")
result = index_1.intersection(index_2, sort=sort)
assert index_1 is not result
assert index_2 is not result
tm.assert_copy(result, index_1)
@pytest.mark.parametrize(
"rng, expected",
# if target has the same name, it is preserved
[
(
timedelta_range("1 day", periods=5, freq="h", name="idx"),
timedelta_range("1 day", periods=4, freq="h", name="idx"),
),
# if target name is different, it will be reset
(
timedelta_range("1 day", periods=5, freq="h", name="other"),
timedelta_range("1 day", periods=4, freq="h", name=None),
),
# if no overlap exists return empty index
(
timedelta_range("1 day", periods=10, freq="h", name="idx")[5:],
TimedeltaIndex([], freq="h", name="idx"),
),
],
)
def test_intersection(self, rng, expected, sort):
# GH 4690 (with tz)
base = timedelta_range("1 day", periods=4, freq="h", name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
@pytest.mark.parametrize(
"rng, expected",
# part intersection works
[
(
TimedeltaIndex(["5 hour", "2 hour", "4 hour", "9 hour"], name="idx"),
TimedeltaIndex(["2 hour", "4 hour"], name="idx"),
),
# reordered part intersection
(
TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"),
TimedeltaIndex(["1 hour", "2 hour"], name=None),
),
# reversed index
(
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[
::-1
],
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx"),
),
],
)
def test_intersection_non_monotonic(self, rng, expected, sort):
# 24471 non-monotonic
base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
# if reversed order, frequency is still the same
if all(base == rng[::-1]) and sort is None:
assert isinstance(result.freq, Hour)
else:
assert result.freq is None
class TestTimedeltaIndexDifference:
def test_difference_freq(self, sort):
# GH14323: Difference of TimedeltaIndex should not preserve frequency
index = timedelta_range("0 days", "5 days", freq="D")
other = timedelta_range("1 days", "4 days", freq="D")
expected = TimedeltaIndex(["0 days", "5 days"], freq=None)
idx_diff = index.difference(other, sort)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
def test_difference_sort(self, sort):
index = pd.TimedeltaIndex(
["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"]
)
other = timedelta_range("1 days", "4 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["5 days", "0 days"], freq=None)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["1 days", "0 days"], freq=None)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
| 37.745098 | 88 | 0.581922 | import numpy as np
import pytest
import pandas as pd
from pandas import Int64Index, TimedeltaIndex, timedelta_range
import pandas._testing as tm
from pandas.tseries.offsets import Hour
class TestTimedeltaIndex:
def test_union(self):
i1 = timedelta_range("1day", periods=5)
i2 = timedelta_range("3day", periods=5)
result = i1.union(i2)
expected = timedelta_range("1day", periods=7)
tm.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = timedelta_range(start="1 day", periods=10, freq="D")
i1.union(i2)
i2.union(i1)
def test_union_sort_false(self):
tdi = timedelta_range("1day", periods=5)
left = tdi[3:]
right = tdi[:3]
# Check that we are testing the desired code path
assert left._can_fast_union(right)
result = left.union(right)
tm.assert_index_equal(result, tdi)
result = left.union(right, sort=False)
expected = pd.TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"])
tm.assert_index_equal(result, expected)
def test_union_coverage(self):
idx = TimedeltaIndex(["3d", "1d", "2d"])
ordered = TimedeltaIndex(idx.sort_values(), freq="infer")
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = timedelta_range("1 day", periods=4, freq="3H")
rng_b = timedelta_range("1 day", periods=4, freq="4H")
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(rng_a) | set(rng_b)))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(["1 day 15:19:49.695000"])
right = TimedeltaIndex(
["2 day 13:04:21.322000", "1 day 15:27:24.873000", "1 day 15:31:05.350000"]
)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_freq_infer(self):
# When taking the union of two TimedeltaIndexes, we infer
# a freq even if the arguments don't have freq. This matches
tdi = pd.timedelta_range("1 Day", periods=5)
left = tdi[[0, 1, 3, 4]]
right = tdi[[2, 3, 1]]
assert left.freq is None
assert right.freq is None
result = left.union(right)
tm.assert_index_equal(result, tdi)
assert result.freq == "D"
def test_intersection_bug_1708(self):
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
assert len(result) == 0
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range("1 day 01:00:00", periods=3, freq="h")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_intersection_equal(self, sort):
first = timedelta_range("1 day", periods=4, freq="h")
second = timedelta_range("1 day", periods=4, freq="h")
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)])
def test_intersection_zero_length(self, period_1, period_2, sort):
index_1 = timedelta_range("1 day", periods=period_1, freq="h")
index_2 = timedelta_range("1 day", periods=period_2, freq="h")
expected = timedelta_range("1 day", periods=0, freq="h")
result = index_1.intersection(index_2, sort=sort)
tm.assert_index_equal(result, expected)
def test_zero_length_input_index(self, sort):
index_1 = timedelta_range("1 day", periods=0, freq="h")
index_2 = timedelta_range("1 day", periods=3, freq="h")
result = index_1.intersection(index_2, sort=sort)
assert index_1 is not result
assert index_2 is not result
tm.assert_copy(result, index_1)
@pytest.mark.parametrize(
"rng, expected",
[
(
timedelta_range("1 day", periods=5, freq="h", name="idx"),
timedelta_range("1 day", periods=4, freq="h", name="idx"),
),
(
timedelta_range("1 day", periods=5, freq="h", name="other"),
timedelta_range("1 day", periods=4, freq="h", name=None),
),
(
timedelta_range("1 day", periods=10, freq="h", name="idx")[5:],
TimedeltaIndex([], freq="h", name="idx"),
),
],
)
def test_intersection(self, rng, expected, sort):
base = timedelta_range("1 day", periods=4, freq="h", name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
@pytest.mark.parametrize(
"rng, expected",
[
(
TimedeltaIndex(["5 hour", "2 hour", "4 hour", "9 hour"], name="idx"),
TimedeltaIndex(["2 hour", "4 hour"], name="idx"),
),
(
TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"),
TimedeltaIndex(["1 hour", "2 hour"], name=None),
),
(
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[
::-1
],
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx"),
),
],
)
def test_intersection_non_monotonic(self, rng, expected, sort):
base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
if all(base == rng[::-1]) and sort is None:
assert isinstance(result.freq, Hour)
else:
assert result.freq is None
class TestTimedeltaIndexDifference:
def test_difference_freq(self, sort):
index = timedelta_range("0 days", "5 days", freq="D")
other = timedelta_range("1 days", "4 days", freq="D")
expected = TimedeltaIndex(["0 days", "5 days"], freq=None)
idx_diff = index.difference(other, sort)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
def test_difference_sort(self, sort):
index = pd.TimedeltaIndex(
["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"]
)
other = timedelta_range("1 days", "4 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["5 days", "0 days"], freq=None)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["1 days", "0 days"], freq=None)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
| true | true |
f7fdaa029dd647b0bc142b96342a06b4457c06ce | 94 | py | Python | enthought/mayavi/filters/delaunay2d.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/mayavi/filters/delaunay2d.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/mayavi/filters/delaunay2d.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from __future__ import absolute_import
from mayavi.filters.delaunay2d import *
| 23.5 | 39 | 0.840426 |
from __future__ import absolute_import
from mayavi.filters.delaunay2d import *
| true | true |
f7fdac1ec7a2d0a1dbbf432803eb274f794b791d | 3,102 | py | Python | PythonProjects/RpgHordeGame/RPG_Horde.py | Samuel-Melo890/Python-Projects | 7968b4a7383ec95b92993264f8469240ac92425c | [
"MIT"
] | null | null | null | PythonProjects/RpgHordeGame/RPG_Horde.py | Samuel-Melo890/Python-Projects | 7968b4a7383ec95b92993264f8469240ac92425c | [
"MIT"
] | null | null | null | PythonProjects/RpgHordeGame/RPG_Horde.py | Samuel-Melo890/Python-Projects | 7968b4a7383ec95b92993264f8469240ac92425c | [
"MIT"
] | null | null | null | '''Create a game with races and some classes of an ordinary fantasy medieval rpg where:
1. You choose a race, a class and start 1v1 battles against some monsters, gaining points
for each monster defeated. Each race and class will have some unique traits and skills.
2. The battle system will use some dices (like a Tabletop RPG) that will influence on the
results of the battle. There will be three main options to choose to act: Attack (with at
least 2 different attacks by class), Defend (where the dice results are used as shields to
reduce the enemy attack), Evade (where if your dice results are greater than the enemy, you
nullify the enemy damage else you take the entire damage, and use some item).
3. When your character dies you will receive your points acumulated for each enemy
defeated.'''
from tkinter import *
from tkinter import ttk
import os
from RPG_module.classes import *
from RPG_module.functions import *
dir = os.path.dirname(__file__)
game = Tk()
game.title('RPG Horde Game')
game.geometry('700x400')
screen_fr = Frame(game, borderwidth=1, relief='flat', background='lightblue')
screen_fr.place(x=0, y=0, width=1280, height=660)
img = PhotoImage(file=dir+r'\Images\rpg_screen.gif')
lb_img = Label(screen_fr, image=img, borderwidth=0).pack()
start_fr = Frame(lb_img, borderwidth=1, relief='raised', background='black', width=400, height=400)
start_fr.pack(pady=60, ipady=20)
start_title = Label(start_fr, text='RPG Horde Game', font=('Comic Sans MS', 15), foreground='white', background='darkred', width=30)
start_title.grid(columnspan=2, row=0, sticky='n')
Label(start_fr, text='Character Name', bg='darkred', fg='white', font=('Comic Sans MS', 12)).grid(columnspan=2, row=1, pady=20)
c_name = Entry(start_fr, width=23)
c_name.grid(columnspan=2, row=2)
Label(start_fr, text='Race', font=('Comic Sans MS', 12), background='darkred', foreground='white').grid(columnspan=2, row=3, pady=20)
races = ['Human', 'Elf', 'Dwarf', 'Half Orc']
start_race = ttk.Combobox(start_fr, values=races)
start_race.set(races[0])
start_race.grid(columnspan=2, row=4)
Label(start_fr, text='Class', font=('Comic Sans MS', 12), background='darkred', foreground='white').grid(columnspan=2, row=5, pady=20)
classes = ['Knight', 'Mage', 'Archer', 'Swordsman', 'Barbarian', 'Rogue']
start_class = ttk.Combobox(start_fr, values=classes)
start_class.set(classes[0])
start_class.grid(columnspan=2, row=6)
Label(start_fr, text='Battle Scene', font=('Comic Sans MS', 12), bg='darkred', fg='white').grid(columnspan=2, row=7, pady=20)
scenes = ['Scene 1', 'Scene 2', 'Scene 3']
scene = ttk.Combobox(start_fr, values=scenes)
scene.set(value=scenes[0])
scene.grid(columnspan=2, row=8)
btn_play = Button(start_fr, text='Play Game', font=('Comic Sans MS', 12), background='darkred', foreground='white', command=play)
btn_play.grid(column=0, row=9, pady=25, sticky='e', padx=15)
btn_exit = Button(start_fr, text='Exit Game', font=('Comic Sans MS', 12), background='darkred', foreground='white', command=game.quit)
btn_exit.grid(column=1, row=9, pady=25, sticky='w', padx=15)
game.mainloop()
| 42.493151 | 134 | 0.734043 |
from tkinter import *
from tkinter import ttk
import os
from RPG_module.classes import *
from RPG_module.functions import *
dir = os.path.dirname(__file__)
game = Tk()
game.title('RPG Horde Game')
game.geometry('700x400')
screen_fr = Frame(game, borderwidth=1, relief='flat', background='lightblue')
screen_fr.place(x=0, y=0, width=1280, height=660)
img = PhotoImage(file=dir+r'\Images\rpg_screen.gif')
lb_img = Label(screen_fr, image=img, borderwidth=0).pack()
start_fr = Frame(lb_img, borderwidth=1, relief='raised', background='black', width=400, height=400)
start_fr.pack(pady=60, ipady=20)
start_title = Label(start_fr, text='RPG Horde Game', font=('Comic Sans MS', 15), foreground='white', background='darkred', width=30)
start_title.grid(columnspan=2, row=0, sticky='n')
Label(start_fr, text='Character Name', bg='darkred', fg='white', font=('Comic Sans MS', 12)).grid(columnspan=2, row=1, pady=20)
c_name = Entry(start_fr, width=23)
c_name.grid(columnspan=2, row=2)
Label(start_fr, text='Race', font=('Comic Sans MS', 12), background='darkred', foreground='white').grid(columnspan=2, row=3, pady=20)
races = ['Human', 'Elf', 'Dwarf', 'Half Orc']
start_race = ttk.Combobox(start_fr, values=races)
start_race.set(races[0])
start_race.grid(columnspan=2, row=4)
Label(start_fr, text='Class', font=('Comic Sans MS', 12), background='darkred', foreground='white').grid(columnspan=2, row=5, pady=20)
classes = ['Knight', 'Mage', 'Archer', 'Swordsman', 'Barbarian', 'Rogue']
start_class = ttk.Combobox(start_fr, values=classes)
start_class.set(classes[0])
start_class.grid(columnspan=2, row=6)
Label(start_fr, text='Battle Scene', font=('Comic Sans MS', 12), bg='darkred', fg='white').grid(columnspan=2, row=7, pady=20)
scenes = ['Scene 1', 'Scene 2', 'Scene 3']
scene = ttk.Combobox(start_fr, values=scenes)
scene.set(value=scenes[0])
scene.grid(columnspan=2, row=8)
btn_play = Button(start_fr, text='Play Game', font=('Comic Sans MS', 12), background='darkred', foreground='white', command=play)
btn_play.grid(column=0, row=9, pady=25, sticky='e', padx=15)
btn_exit = Button(start_fr, text='Exit Game', font=('Comic Sans MS', 12), background='darkred', foreground='white', command=game.quit)
btn_exit.grid(column=1, row=9, pady=25, sticky='w', padx=15)
game.mainloop()
| true | true |
f7fdac53a697b91f50273d8dd7c43ed55557a0dd | 3,772 | py | Python | contrib/macdeploy/custom_dsstore.py | mxdum/Mxdum | 813de58604a5dc0936e61c440af8b768cb35f055 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | mxdum/Mxdum | 813de58604a5dc0936e61c440af8b768cb35f055 | [
"MIT"
] | 1 | 2019-02-23T21:39:25.000Z | 2019-02-23T21:39:25.000Z | contrib/macdeploy/custom_dsstore.py | mxdum/Mxdum | 813de58604a5dc0936e61c440af8b768cb35f055 | [
"MIT"
] | 1 | 2019-02-23T21:34:31.000Z | 2019-02-23T21:34:31.000Z | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Mxdum-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.836066 | 1,817 | 0.7272 |
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Mxdum-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| true | true |
f7fdac9cb82b14c35e8dcbf67fb1523ee2ce8cca | 316 | py | Python | Step_8/Parameter.py | LeeDaeil/Process_A3C | 1876fbe1b928e13b9c8766095b2d13abfda94019 | [
"Apache-2.0"
] | 1 | 2020-03-22T14:39:44.000Z | 2020-03-22T14:39:44.000Z | Step_8/Parameter.py | LeeDaeil/Process_A3C | 1876fbe1b928e13b9c8766095b2d13abfda94019 | [
"Apache-2.0"
] | 1 | 2018-12-23T07:35:43.000Z | 2018-12-23T07:35:43.000Z | Step_8/Parameter.py | LeeDaeil/Process_A3C | 1876fbe1b928e13b9c8766095b2d13abfda94019 | [
"Apache-2.0"
] | 2 | 2018-10-10T01:56:04.000Z | 2018-10-11T03:57:39.000Z | class PARA:
CNS_ip = '192.168.0.55'
CNS_port = 7001
CNS_test_ip = '192.168.0.60'
CNS_test_port = 7001
Remote_ip = '192.168.0.29'
Remote_port = 7001
# select model
# Model = 'LSTM'
Model = 'DNN'
show_model = False
show_input_windows = True
save_input_log = False
| 15.8 | 32 | 0.601266 | class PARA:
CNS_ip = '192.168.0.55'
CNS_port = 7001
CNS_test_ip = '192.168.0.60'
CNS_test_port = 7001
Remote_ip = '192.168.0.29'
Remote_port = 7001
Model = 'DNN'
show_model = False
show_input_windows = True
save_input_log = False
| true | true |
f7fdacbe2c01fbe3fa5d31f5b4df0d5a906394da | 8,660 | py | Python | controllerlib/action.py | magnetikonline/docker-unifi-network-controller | d3f8e31af47b1db488448a60876f35862fd99918 | [
"MIT"
] | 3 | 2019-11-16T15:33:13.000Z | 2020-05-28T02:01:13.000Z | controllerlib/action.py | magnetikonline/docker-unifi-network-controller | d3f8e31af47b1db488448a60876f35862fd99918 | [
"MIT"
] | null | null | null | controllerlib/action.py | magnetikonline/docker-unifi-network-controller | d3f8e31af47b1db488448a60876f35862fd99918 | [
"MIT"
] | null | null | null | import os
import tarfile
from typing import AbstractSet
from controllerlib import docker
CONTROLLER_REPOSITORY_NAME = "magnetikonline/unifi-network-controller"
CONTROLLER_PORT_COMMS = 8080
CONTROLLER_PORT_GUI = 8443
CONTROLLER_BASE_DIR = "/usr/lib/unifi"
BACKUP_REPOSITORY_NAME = "alpine"
BACKUP_RESTORE_BACKUP_PATH = "/backup"
BACKUP_RESTORE_VOLUME_MOUNT_PATH = "/data"
BACKUP_ARCHIVE_KEY_FILE_LIST = [
"./db/version",
"./db/WiredTiger",
"./firmware.json",
"./system.properties",
]
def start_server(image_tag: str, server_prefix: str, no_host_network: bool) -> None:
# confirm controller Docker image exists, if not pull it
_image_pull(CONTROLLER_REPOSITORY_NAME, image_tag)
# query containers, confirm server container does not exist, otherwise exit
container_name = _container_server_name(server_prefix)
if container_name in dict(docker.container_list()):
raise FatalError(f"container [{container_name}] already exists")
# query Docker volumes, create any that are required
volume_name_data = _volume_data_name(server_prefix)
volume_name_logs = _volume_logs_name(server_prefix)
volume_list = dict(docker.volume_list()).keys()
_volume_create(volume_name_data, volume_list)
_volume_create(volume_name_logs, volume_list)
# start server
print(
f"Starting server [{CONTROLLER_REPOSITORY_NAME}:{image_tag}] as [{container_name}]"
)
try:
container_id = docker.container_run(
image_repository=CONTROLLER_REPOSITORY_NAME,
image_tag=image_tag,
detach=True,
name=container_name,
network_host=not no_host_network,
publish_list=[
(CONTROLLER_PORT_COMMS, CONTROLLER_PORT_COMMS),
(CONTROLLER_PORT_GUI, CONTROLLER_PORT_GUI),
],
remove_on_exit=True,
volume_list=[
(volume_name_data, f"{CONTROLLER_BASE_DIR}/data"),
(volume_name_logs, f"{CONTROLLER_BASE_DIR}/logs"),
],
)
except docker.DockerError:
raise FatalError("unable to start server")
print(f"Running as container ID [{container_id}]")
def stop_server(server_prefix: str) -> None:
container_name = _container_server_name(server_prefix)
container_list = dict(docker.container_list())
# confirm server container exists and is running
if container_name not in container_list:
raise FatalError(f"container [{container_name}] not does exist")
if not container_list[container_name]["running"]:
raise FatalError(f"container [{container_name}] not running")
print(f"Stopping server [{container_name}]")
try:
docker.container_stop(container_name)
except docker.DockerError:
raise FatalError("unable to stop server")
print("Server has stopped")
def backup(server_prefix: str, archive_dir: str, archive_name: str) -> None:
# confirm data volume for backup exists
volume_name_data = _volume_data_name(server_prefix)
if volume_name_data not in dict(docker.volume_list()):
raise FatalError(f"data volume [{volume_name_data}] does not exist for backup")
# confirm image used for backup exists, if not pull it
_image_pull(BACKUP_REPOSITORY_NAME)
try:
# execute backup of volume - using temporary container
docker.container_run(
image_repository=BACKUP_REPOSITORY_NAME,
bind_list=[(archive_dir, BACKUP_RESTORE_BACKUP_PATH)],
command_arg_list=[
"/bin/sh",
"-c",
_backup_archive_cmd(f"{BACKUP_RESTORE_BACKUP_PATH}/{archive_name}"),
],
remove_on_exit=True,
volume_list=[(volume_name_data, BACKUP_RESTORE_VOLUME_MOUNT_PATH)],
)
except docker.DockerError:
raise FatalError(f"unable to backup data volume [{volume_name_data}]")
print(f"Backup successfully created at [{archive_dir}/{archive_name}]")
def _backup_archive_cmd(file: str) -> str:
# command to a) create tar archive of data volume b) change ownership to current user
return (
f'/bin/tar -czf "{file}" -C "{BACKUP_RESTORE_VOLUME_MOUNT_PATH}" . && '
f'chown {os.getuid()}:{os.getgid()} "{file}"'
)
def restore(server_prefix: str, archive_dir: str, archive_name: str) -> None:
# confirm archive exists/is an archive and contains controller data files
_restore_verify_archive(f"{archive_dir}/{archive_name}")
# data archive considered valid
# confirm controller isn't currently running as we're rebuilding the data volume and it can't be in use
container_name = _container_server_name(server_prefix)
for name, data in docker.container_list():
if name == container_name and data["running"]:
raise FatalError(
f"container [{container_name}] currently running, "
"associated data volume must not be in use for restore"
)
# remove (possible) existing data volume
volume_name_data = _volume_data_name(server_prefix)
for name, _ in docker.volume_list():
if name != volume_name_data:
continue
# found existing volume - remove it
try:
docker.volume_delete(volume_name_data)
print(f"Removed existing data volume [{volume_name_data}]")
except docker.DockerError:
raise FatalError(
f"unable to remove existing data volume [{volume_name_data}]"
)
# confirm image used for restore exists, if not pull it and create new volume
_image_pull(BACKUP_REPOSITORY_NAME)
_volume_create(volume_name_data)
# restore archive into new data volume
try:
# execute backup of volume - using temporary container
docker.container_run(
image_repository=BACKUP_REPOSITORY_NAME,
bind_list=[(archive_dir, BACKUP_RESTORE_BACKUP_PATH)],
command_arg_list=[
"/bin/sh",
"-c",
_restore_archive_cmd(f"{BACKUP_RESTORE_BACKUP_PATH}/{archive_name}"),
],
remove_on_exit=True,
volume_list=[(volume_name_data, BACKUP_RESTORE_VOLUME_MOUNT_PATH)],
)
except docker.DockerError:
raise FatalError(f"unable to restore data volume [{volume_name_data}]")
print(f"Data volume successfully restored from [{archive_dir}/{archive_name}]")
def _restore_verify_archive(archive_path: str) -> None:
# open archive, confirm it's a tar file
try:
archive_tar = tarfile.open(archive_path, mode="r")
except OSError:
raise FatalError(f"unable to open archive [{archive_path}]")
except tarfile.TarError:
raise FatalError(f"it appears [{archive_path}] is not a tar file")
# analyse, confirming it contains key controller data files
key_file_count = 0
for tar_file in archive_tar.getmembers():
if tar_file.name in BACKUP_ARCHIVE_KEY_FILE_LIST:
key_file_count += 1
archive_tar.close()
if key_file_count < len(BACKUP_ARCHIVE_KEY_FILE_LIST):
# didn't find every file expected
raise FatalError(
f"archive [{archive_path}] doesn't appear to be a controller data backup"
)
# archive passed verification
def _restore_archive_cmd(file: str) -> str:
# command to a) move into the root of the Docker vole b) extract tar mounted at host into volume
return f'cd "{BACKUP_RESTORE_VOLUME_MOUNT_PATH}" && tar -xf "{file}"'
def _container_server_name(server_prefix: str) -> str:
return f"{server_prefix}-server"
def _volume_data_name(server_prefix: str) -> str:
return f"{server_prefix}-data"
def _volume_logs_name(server_prefix: str) -> str:
return f"{server_prefix}-logs"
def _image_pull(repository: str, tag: str = "latest") -> None:
image_name = f"{repository}:{tag}"
if image_name in dict(docker.image_list()):
# no work
return
print(f"Docker image [{image_name}] not available - attempting to pull")
try:
docker.image_pull(repository, tag)
except docker.DockerError:
raise FatalError(f"unable to pull [{image_name}]")
print("Successfully pulled Docker image")
def _volume_create(name: str, existing_volume_list: AbstractSet[str] = set()) -> None:
if name in existing_volume_list:
# no work
return
try:
docker.volume_create(name)
except docker.DockerError:
raise FatalError(f"unable to create volume [{name}]")
print(f"Created volume [{name}]")
class FatalError(Exception):
pass
| 34.501992 | 107 | 0.677714 | import os
import tarfile
from typing import AbstractSet
from controllerlib import docker
CONTROLLER_REPOSITORY_NAME = "magnetikonline/unifi-network-controller"
CONTROLLER_PORT_COMMS = 8080
CONTROLLER_PORT_GUI = 8443
CONTROLLER_BASE_DIR = "/usr/lib/unifi"
BACKUP_REPOSITORY_NAME = "alpine"
BACKUP_RESTORE_BACKUP_PATH = "/backup"
BACKUP_RESTORE_VOLUME_MOUNT_PATH = "/data"
BACKUP_ARCHIVE_KEY_FILE_LIST = [
"./db/version",
"./db/WiredTiger",
"./firmware.json",
"./system.properties",
]
def start_server(image_tag: str, server_prefix: str, no_host_network: bool) -> None:
_image_pull(CONTROLLER_REPOSITORY_NAME, image_tag)
container_name = _container_server_name(server_prefix)
if container_name in dict(docker.container_list()):
raise FatalError(f"container [{container_name}] already exists")
volume_name_data = _volume_data_name(server_prefix)
volume_name_logs = _volume_logs_name(server_prefix)
volume_list = dict(docker.volume_list()).keys()
_volume_create(volume_name_data, volume_list)
_volume_create(volume_name_logs, volume_list)
print(
f"Starting server [{CONTROLLER_REPOSITORY_NAME}:{image_tag}] as [{container_name}]"
)
try:
container_id = docker.container_run(
image_repository=CONTROLLER_REPOSITORY_NAME,
image_tag=image_tag,
detach=True,
name=container_name,
network_host=not no_host_network,
publish_list=[
(CONTROLLER_PORT_COMMS, CONTROLLER_PORT_COMMS),
(CONTROLLER_PORT_GUI, CONTROLLER_PORT_GUI),
],
remove_on_exit=True,
volume_list=[
(volume_name_data, f"{CONTROLLER_BASE_DIR}/data"),
(volume_name_logs, f"{CONTROLLER_BASE_DIR}/logs"),
],
)
except docker.DockerError:
raise FatalError("unable to start server")
print(f"Running as container ID [{container_id}]")
def stop_server(server_prefix: str) -> None:
container_name = _container_server_name(server_prefix)
container_list = dict(docker.container_list())
if container_name not in container_list:
raise FatalError(f"container [{container_name}] not does exist")
if not container_list[container_name]["running"]:
raise FatalError(f"container [{container_name}] not running")
print(f"Stopping server [{container_name}]")
try:
docker.container_stop(container_name)
except docker.DockerError:
raise FatalError("unable to stop server")
print("Server has stopped")
def backup(server_prefix: str, archive_dir: str, archive_name: str) -> None:
volume_name_data = _volume_data_name(server_prefix)
if volume_name_data not in dict(docker.volume_list()):
raise FatalError(f"data volume [{volume_name_data}] does not exist for backup")
_image_pull(BACKUP_REPOSITORY_NAME)
try:
docker.container_run(
image_repository=BACKUP_REPOSITORY_NAME,
bind_list=[(archive_dir, BACKUP_RESTORE_BACKUP_PATH)],
command_arg_list=[
"/bin/sh",
"-c",
_backup_archive_cmd(f"{BACKUP_RESTORE_BACKUP_PATH}/{archive_name}"),
],
remove_on_exit=True,
volume_list=[(volume_name_data, BACKUP_RESTORE_VOLUME_MOUNT_PATH)],
)
except docker.DockerError:
raise FatalError(f"unable to backup data volume [{volume_name_data}]")
print(f"Backup successfully created at [{archive_dir}/{archive_name}]")
def _backup_archive_cmd(file: str) -> str:
return (
f'/bin/tar -czf "{file}" -C "{BACKUP_RESTORE_VOLUME_MOUNT_PATH}" . && '
f'chown {os.getuid()}:{os.getgid()} "{file}"'
)
def restore(server_prefix: str, archive_dir: str, archive_name: str) -> None:
_restore_verify_archive(f"{archive_dir}/{archive_name}")
container_name = _container_server_name(server_prefix)
for name, data in docker.container_list():
if name == container_name and data["running"]:
raise FatalError(
f"container [{container_name}] currently running, "
"associated data volume must not be in use for restore"
)
# remove (possible) existing data volume
volume_name_data = _volume_data_name(server_prefix)
for name, _ in docker.volume_list():
if name != volume_name_data:
continue
# found existing volume - remove it
try:
docker.volume_delete(volume_name_data)
print(f"Removed existing data volume [{volume_name_data}]")
except docker.DockerError:
raise FatalError(
f"unable to remove existing data volume [{volume_name_data}]"
)
# confirm image used for restore exists, if not pull it and create new volume
_image_pull(BACKUP_REPOSITORY_NAME)
_volume_create(volume_name_data)
# restore archive into new data volume
try:
# execute backup of volume - using temporary container
docker.container_run(
image_repository=BACKUP_REPOSITORY_NAME,
bind_list=[(archive_dir, BACKUP_RESTORE_BACKUP_PATH)],
command_arg_list=[
"/bin/sh",
"-c",
_restore_archive_cmd(f"{BACKUP_RESTORE_BACKUP_PATH}/{archive_name}"),
],
remove_on_exit=True,
volume_list=[(volume_name_data, BACKUP_RESTORE_VOLUME_MOUNT_PATH)],
)
except docker.DockerError:
raise FatalError(f"unable to restore data volume [{volume_name_data}]")
print(f"Data volume successfully restored from [{archive_dir}/{archive_name}]")
def _restore_verify_archive(archive_path: str) -> None:
# open archive, confirm it's a tar file
try:
archive_tar = tarfile.open(archive_path, mode="r")
except OSError:
raise FatalError(f"unable to open archive [{archive_path}]")
except tarfile.TarError:
raise FatalError(f"it appears [{archive_path}] is not a tar file")
key_file_count = 0
for tar_file in archive_tar.getmembers():
if tar_file.name in BACKUP_ARCHIVE_KEY_FILE_LIST:
key_file_count += 1
archive_tar.close()
if key_file_count < len(BACKUP_ARCHIVE_KEY_FILE_LIST):
raise FatalError(
f"archive [{archive_path}] doesn't appear to be a controller data backup"
)
def _restore_archive_cmd(file: str) -> str:
return f'cd "{BACKUP_RESTORE_VOLUME_MOUNT_PATH}" && tar -xf "{file}"'
def _container_server_name(server_prefix: str) -> str:
return f"{server_prefix}-server"
def _volume_data_name(server_prefix: str) -> str:
return f"{server_prefix}-data"
def _volume_logs_name(server_prefix: str) -> str:
return f"{server_prefix}-logs"
def _image_pull(repository: str, tag: str = "latest") -> None:
image_name = f"{repository}:{tag}"
if image_name in dict(docker.image_list()):
return
print(f"Docker image [{image_name}] not available - attempting to pull")
try:
docker.image_pull(repository, tag)
except docker.DockerError:
raise FatalError(f"unable to pull [{image_name}]")
print("Successfully pulled Docker image")
def _volume_create(name: str, existing_volume_list: AbstractSet[str] = set()) -> None:
if name in existing_volume_list:
return
try:
docker.volume_create(name)
except docker.DockerError:
raise FatalError(f"unable to create volume [{name}]")
print(f"Created volume [{name}]")
class FatalError(Exception):
pass
| true | true |
f7fdae0a82acd5745866d6173d68f4a83be7ab76 | 5,406 | py | Python | homeassistant/components/modbus/validators.py | gadgetmobile/homeassistant-core | 14b74fbf71ba6d508744883fd2af138d6356ee02 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/modbus/validators.py | gadgetmobile/homeassistant-core | 14b74fbf71ba6d508744883fd2af138d6356ee02 | [
"Apache-2.0"
] | 49 | 2021-01-16T21:01:32.000Z | 2022-03-31T06:06:06.000Z | homeassistant/components/modbus/validators.py | Vaarlion/core | f3de8b9f28de01abf72c0f5bb0b457eb1841f201 | [
"Apache-2.0"
] | null | null | null | """Validate Modbus configuration."""
from __future__ import annotations
import logging
import struct
from typing import Any
import voluptuous as vol
from homeassistant.const import (
CONF_COUNT,
CONF_NAME,
CONF_SCAN_INTERVAL,
CONF_STRUCTURE,
CONF_TIMEOUT,
)
from .const import (
CONF_DATA_TYPE,
CONF_SWAP,
CONF_SWAP_BYTE,
CONF_SWAP_NONE,
DATA_TYPE_CUSTOM,
DATA_TYPE_FLOAT,
DATA_TYPE_FLOAT16,
DATA_TYPE_FLOAT32,
DATA_TYPE_FLOAT64,
DATA_TYPE_INT,
DATA_TYPE_INT16,
DATA_TYPE_INT32,
DATA_TYPE_INT64,
DATA_TYPE_UINT,
DATA_TYPE_UINT16,
DATA_TYPE_UINT32,
DATA_TYPE_UINT64,
DEFAULT_SCAN_INTERVAL,
DEFAULT_STRUCT_FORMAT,
PLATFORMS,
)
_LOGGER = logging.getLogger(__name__)
OLD_DATA_TYPES = {
DATA_TYPE_INT: {
1: DATA_TYPE_INT16,
2: DATA_TYPE_INT32,
4: DATA_TYPE_INT64,
},
DATA_TYPE_UINT: {
1: DATA_TYPE_UINT16,
2: DATA_TYPE_UINT32,
4: DATA_TYPE_UINT64,
},
DATA_TYPE_FLOAT: {
1: DATA_TYPE_FLOAT16,
2: DATA_TYPE_FLOAT32,
4: DATA_TYPE_FLOAT64,
},
}
def struct_validator(config):
"""Sensor schema validator."""
data_type = config[CONF_DATA_TYPE]
count = config.get(CONF_COUNT, 1)
name = config[CONF_NAME]
structure = config.get(CONF_STRUCTURE)
swap_type = config.get(CONF_SWAP)
if data_type in [DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT]:
error = f"{name} with {data_type} is not valid, trying to convert"
_LOGGER.warning(error)
try:
data_type = OLD_DATA_TYPES[data_type][config.get(CONF_COUNT, 1)]
config[CONF_DATA_TYPE] = data_type
except KeyError as exp:
error = f"{name} cannot convert automatically {data_type}"
raise vol.Invalid(error) from exp
if config[CONF_DATA_TYPE] != DATA_TYPE_CUSTOM:
if structure:
error = f"{name} structure: cannot be mixed with {data_type}"
raise vol.Invalid(error)
structure = f">{DEFAULT_STRUCT_FORMAT[data_type][0]}"
if CONF_COUNT not in config:
config[CONF_COUNT] = DEFAULT_STRUCT_FORMAT[data_type][1]
else:
if not structure:
error = (
f"Error in sensor {name}. The `{CONF_STRUCTURE}` field can not be empty"
)
raise vol.Invalid(error)
try:
size = struct.calcsize(structure)
except struct.error as err:
raise vol.Invalid(f"Error in {name} structure: {str(err)}") from err
count = config.get(CONF_COUNT, 1)
bytecount = count * 2
if bytecount != size:
raise vol.Invalid(
f"Structure request {size} bytes, "
f"but {count} registers have a size of {bytecount} bytes"
)
if swap_type != CONF_SWAP_NONE:
if swap_type == CONF_SWAP_BYTE:
regs_needed = 1
else: # CONF_SWAP_WORD_BYTE, CONF_SWAP_WORD
regs_needed = 2
if count < regs_needed or (count % regs_needed) != 0:
raise vol.Invalid(
f"Error in sensor {name} swap({swap_type}) "
f"not possible due to the registers "
f"count: {count}, needed: {regs_needed}"
)
return {
**config,
CONF_STRUCTURE: structure,
CONF_SWAP: swap_type,
}
def number_validator(value: Any) -> int | float:
"""Coerce a value to number without losing precision."""
if isinstance(value, int):
return value
if isinstance(value, float):
return value
try:
value = int(value)
return value
except (TypeError, ValueError):
pass
try:
value = float(value)
return value
except (TypeError, ValueError) as err:
raise vol.Invalid(f"invalid number {value}") from err
def scan_interval_validator(config: dict) -> dict:
"""Control scan_interval."""
for hub in config:
minimum_scan_interval = DEFAULT_SCAN_INTERVAL
for component, conf_key in PLATFORMS:
if conf_key not in hub:
continue
for entry in hub[conf_key]:
scan_interval = entry.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
if scan_interval == 0:
continue
if scan_interval < 5:
_LOGGER.warning(
"%s %s scan_interval(%d) is lower than 5 seconds, "
"which may cause Home Assistant stability issues",
component,
entry.get(CONF_NAME),
scan_interval,
)
entry[CONF_SCAN_INTERVAL] = scan_interval
minimum_scan_interval = min(scan_interval, minimum_scan_interval)
if (
CONF_TIMEOUT in hub
and hub[CONF_TIMEOUT] > minimum_scan_interval - 1
and minimum_scan_interval > 1
):
_LOGGER.warning(
"Modbus %s timeout(%d) is adjusted(%d) due to scan_interval",
hub.get(CONF_NAME, ""),
hub[CONF_TIMEOUT],
minimum_scan_interval - 1,
)
hub[CONF_TIMEOUT] = minimum_scan_interval - 1
return config
| 30.370787 | 88 | 0.586385 | from __future__ import annotations
import logging
import struct
from typing import Any
import voluptuous as vol
from homeassistant.const import (
CONF_COUNT,
CONF_NAME,
CONF_SCAN_INTERVAL,
CONF_STRUCTURE,
CONF_TIMEOUT,
)
from .const import (
CONF_DATA_TYPE,
CONF_SWAP,
CONF_SWAP_BYTE,
CONF_SWAP_NONE,
DATA_TYPE_CUSTOM,
DATA_TYPE_FLOAT,
DATA_TYPE_FLOAT16,
DATA_TYPE_FLOAT32,
DATA_TYPE_FLOAT64,
DATA_TYPE_INT,
DATA_TYPE_INT16,
DATA_TYPE_INT32,
DATA_TYPE_INT64,
DATA_TYPE_UINT,
DATA_TYPE_UINT16,
DATA_TYPE_UINT32,
DATA_TYPE_UINT64,
DEFAULT_SCAN_INTERVAL,
DEFAULT_STRUCT_FORMAT,
PLATFORMS,
)
_LOGGER = logging.getLogger(__name__)
OLD_DATA_TYPES = {
DATA_TYPE_INT: {
1: DATA_TYPE_INT16,
2: DATA_TYPE_INT32,
4: DATA_TYPE_INT64,
},
DATA_TYPE_UINT: {
1: DATA_TYPE_UINT16,
2: DATA_TYPE_UINT32,
4: DATA_TYPE_UINT64,
},
DATA_TYPE_FLOAT: {
1: DATA_TYPE_FLOAT16,
2: DATA_TYPE_FLOAT32,
4: DATA_TYPE_FLOAT64,
},
}
def struct_validator(config):
data_type = config[CONF_DATA_TYPE]
count = config.get(CONF_COUNT, 1)
name = config[CONF_NAME]
structure = config.get(CONF_STRUCTURE)
swap_type = config.get(CONF_SWAP)
if data_type in [DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT]:
error = f"{name} with {data_type} is not valid, trying to convert"
_LOGGER.warning(error)
try:
data_type = OLD_DATA_TYPES[data_type][config.get(CONF_COUNT, 1)]
config[CONF_DATA_TYPE] = data_type
except KeyError as exp:
error = f"{name} cannot convert automatically {data_type}"
raise vol.Invalid(error) from exp
if config[CONF_DATA_TYPE] != DATA_TYPE_CUSTOM:
if structure:
error = f"{name} structure: cannot be mixed with {data_type}"
raise vol.Invalid(error)
structure = f">{DEFAULT_STRUCT_FORMAT[data_type][0]}"
if CONF_COUNT not in config:
config[CONF_COUNT] = DEFAULT_STRUCT_FORMAT[data_type][1]
else:
if not structure:
error = (
f"Error in sensor {name}. The `{CONF_STRUCTURE}` field can not be empty"
)
raise vol.Invalid(error)
try:
size = struct.calcsize(structure)
except struct.error as err:
raise vol.Invalid(f"Error in {name} structure: {str(err)}") from err
count = config.get(CONF_COUNT, 1)
bytecount = count * 2
if bytecount != size:
raise vol.Invalid(
f"Structure request {size} bytes, "
f"but {count} registers have a size of {bytecount} bytes"
)
if swap_type != CONF_SWAP_NONE:
if swap_type == CONF_SWAP_BYTE:
regs_needed = 1
else:
regs_needed = 2
if count < regs_needed or (count % regs_needed) != 0:
raise vol.Invalid(
f"Error in sensor {name} swap({swap_type}) "
f"not possible due to the registers "
f"count: {count}, needed: {regs_needed}"
)
return {
**config,
CONF_STRUCTURE: structure,
CONF_SWAP: swap_type,
}
def number_validator(value: Any) -> int | float:
if isinstance(value, int):
return value
if isinstance(value, float):
return value
try:
value = int(value)
return value
except (TypeError, ValueError):
pass
try:
value = float(value)
return value
except (TypeError, ValueError) as err:
raise vol.Invalid(f"invalid number {value}") from err
def scan_interval_validator(config: dict) -> dict:
for hub in config:
minimum_scan_interval = DEFAULT_SCAN_INTERVAL
for component, conf_key in PLATFORMS:
if conf_key not in hub:
continue
for entry in hub[conf_key]:
scan_interval = entry.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
if scan_interval == 0:
continue
if scan_interval < 5:
_LOGGER.warning(
"%s %s scan_interval(%d) is lower than 5 seconds, "
"which may cause Home Assistant stability issues",
component,
entry.get(CONF_NAME),
scan_interval,
)
entry[CONF_SCAN_INTERVAL] = scan_interval
minimum_scan_interval = min(scan_interval, minimum_scan_interval)
if (
CONF_TIMEOUT in hub
and hub[CONF_TIMEOUT] > minimum_scan_interval - 1
and minimum_scan_interval > 1
):
_LOGGER.warning(
"Modbus %s timeout(%d) is adjusted(%d) due to scan_interval",
hub.get(CONF_NAME, ""),
hub[CONF_TIMEOUT],
minimum_scan_interval - 1,
)
hub[CONF_TIMEOUT] = minimum_scan_interval - 1
return config
| true | true |
f7fdae23cdea726d4c696ff651a75f7454cbb106 | 1,054 | py | Python | camera.py | andreajessen/robot | e92d318d1e71994cb1c5c19e97bb27b89d23e22a | [
"MIT"
] | null | null | null | camera.py | andreajessen/robot | e92d318d1e71994cb1c5c19e97bb27b89d23e22a | [
"MIT"
] | null | null | null | camera.py | andreajessen/robot | e92d318d1e71994cb1c5c19e97bb27b89d23e22a | [
"MIT"
] | null | null | null | import os
from PIL import Image
class Camera:
def __init__(self, img_width=128, img_height=96, img_rot=0):
self.value = None
self.img_width = img_width
self.img_height = img_height
self.img_rot = img_rot
def get_value(self):
return self.value
def update(self):
self.sensor_get_value()
return self.value
def reset(self):
self.value = None
def sensor_get_value(self):
# This is a OS call that takes a image and makes it accessible to PIL operations in the same directory
os.system('raspistill -t 1 -o image.png -w "' + str(self.img_width) + '" -h "' + str(self.img_height) + '" -rot "' + str(self.img_rot) + '"')
# Open the image just taken by raspicam
# Stores the RGB array in the value field
self.value = Image.open('image.png').convert('RGB')
# Just testing the camera in python
# os.system('raspistill -t 1 -o image.png -w "' + str(200) + '" -h "' + str(200) + '" -rot "' + str(0) + '"') | 32.9375 | 150 | 0.594877 | import os
from PIL import Image
class Camera:
def __init__(self, img_width=128, img_height=96, img_rot=0):
self.value = None
self.img_width = img_width
self.img_height = img_height
self.img_rot = img_rot
def get_value(self):
return self.value
def update(self):
self.sensor_get_value()
return self.value
def reset(self):
self.value = None
def sensor_get_value(self):
os.system('raspistill -t 1 -o image.png -w "' + str(self.img_width) + '" -h "' + str(self.img_height) + '" -rot "' + str(self.img_rot) + '"')
self.value = Image.open('image.png').convert('RGB')
| true | true |
f7fdae2951a756ee65ff893e4e77f21d1168ab5a | 4,032 | py | Python | tests/test_enum.py | gkpln3/Hydras | b5c91a9eb7f8455db23aab1962eb9da2a2e9e534 | [
"MIT"
] | null | null | null | tests/test_enum.py | gkpln3/Hydras | b5c91a9eb7f8455db23aab1962eb9da2a2e9e534 | [
"MIT"
] | null | null | null | tests/test_enum.py | gkpln3/Hydras | b5c91a9eb7f8455db23aab1962eb9da2a2e9e534 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
:file: EnumsTests.py
This file contains tests for the "Enum" type formatters.
:date: 20/01/2016
:authors:
- Kfir Gollan
"""
import unittest
from hydras import *
class TestEnumFormatters(unittest.TestCase):
def setUp(self):
HydraSettings.push()
HydraSettings.endian = LittleEndian
self.enum_params = ["A", {"A": 1, "B": 2}]
def tearDown(self):
HydraSettings.pop()
def test_get_attributes(self):
enum = Enum(*self.enum_params)
self.assertEqual(1, enum.A)
self.assertEqual(2, enum.B)
with self.assertRaises(AttributeError):
a = enum.C
def test_validate(self):
enum = Enum(*self.enum_params)
self.assertFalse(enum.validate(4))
self.assertTrue(enum.validate(1))
self.assertTrue(enum.validate(enum.B))
def test_format_parse_len(self):
test_set = [{
"class": UInt8,
"value": 1,
"little": b"\x01",
"big": b"\x01",
"len": 1
},
{
"class": UInt16,
"value": 1,
"little": b"\x01\x00",
"big": b"\x00\x01",
"len": 2
},
{
"class": UInt32,
"value": 1,
"little": b"\x01\x00\x00\x00",
"big": b"\x00\x00\x00\x01",
"len": 4
},
{
"class": Int64,
"value": 1,
"little": b"\x01\x00\x00\x00\x00\x00\x00\x00",
"big": b"\x00\x00\x00\x00\x00\x00\x00\x01",
"len": 8
},
{
"class": Int8,
"value": 1,
"little": b"\x01",
"big": b"\x01",
"len": 1
},
{
"class": Int8,
"value": -2,
"little": b"\xfe",
"big": b"\xfe",
"len": 1
},
{
"class": Int16,
"value": 1,
"little": b"\x01\x00",
"big": b"\x00\x01",
"len": 2
},
{
"class": Int16,
"value": -2,
"little": b"\xfe\xff",
"big": b"\xff\xfe",
"len": 2
},
{
"class": Int32,
"value": 1,
"little": b"\x01\x00\x00\x00",
"big": b"\x00\x00\x00\x01",
"len": 4
},
{
"class": Int32,
"value": -2,
"little": b"\xfe\xff\xff\xff",
"big": b"\xff\xff\xff\xfe",
"len": 4
},
{
"class": Int64,
"value": 1,
"little": b"\x01\x00\x00\x00\x00\x00\x00\x00",
"big": b"\x00\x00\x00\x00\x00\x00\x00\x01",
"len": 8
},
{
"class": Int64,
"value": -2,
"little": b"\xfe\xff\xff\xff\xff\xff\xff\xff",
"big": b"\xff\xff\xff\xff\xff\xff\xff\xfe",
"len": 8
}
]
for test in test_set:
enum = Enum(*self.enum_params, format_type=test["class"])
# Little endian
self.assertEqual(enum.format(test["value"], {"endian": base.LittleEndian}), test["little"])
self.assertEqual(enum.parse(test["little"], {"endian": base.LittleEndian}), test["value"])
# Big endian
self.assertEqual(enum.format(test["value"], {"endian": base.BigEndian}), test["big"])
self.assertEqual(enum.parse(test["big"], {"endian": base.BigEndian}), test["value"])
# Length
self.assertEqual(len(enum), test["len"])
if __name__ == '__main__':
unittest.main() | 29.007194 | 103 | 0.401538 |
import unittest
from hydras import *
class TestEnumFormatters(unittest.TestCase):
def setUp(self):
HydraSettings.push()
HydraSettings.endian = LittleEndian
self.enum_params = ["A", {"A": 1, "B": 2}]
def tearDown(self):
HydraSettings.pop()
def test_get_attributes(self):
enum = Enum(*self.enum_params)
self.assertEqual(1, enum.A)
self.assertEqual(2, enum.B)
with self.assertRaises(AttributeError):
a = enum.C
def test_validate(self):
enum = Enum(*self.enum_params)
self.assertFalse(enum.validate(4))
self.assertTrue(enum.validate(1))
self.assertTrue(enum.validate(enum.B))
def test_format_parse_len(self):
test_set = [{
"class": UInt8,
"value": 1,
"little": b"\x01",
"big": b"\x01",
"len": 1
},
{
"class": UInt16,
"value": 1,
"little": b"\x01\x00",
"big": b"\x00\x01",
"len": 2
},
{
"class": UInt32,
"value": 1,
"little": b"\x01\x00\x00\x00",
"big": b"\x00\x00\x00\x01",
"len": 4
},
{
"class": Int64,
"value": 1,
"little": b"\x01\x00\x00\x00\x00\x00\x00\x00",
"big": b"\x00\x00\x00\x00\x00\x00\x00\x01",
"len": 8
},
{
"class": Int8,
"value": 1,
"little": b"\x01",
"big": b"\x01",
"len": 1
},
{
"class": Int8,
"value": -2,
"little": b"\xfe",
"big": b"\xfe",
"len": 1
},
{
"class": Int16,
"value": 1,
"little": b"\x01\x00",
"big": b"\x00\x01",
"len": 2
},
{
"class": Int16,
"value": -2,
"little": b"\xfe\xff",
"big": b"\xff\xfe",
"len": 2
},
{
"class": Int32,
"value": 1,
"little": b"\x01\x00\x00\x00",
"big": b"\x00\x00\x00\x01",
"len": 4
},
{
"class": Int32,
"value": -2,
"little": b"\xfe\xff\xff\xff",
"big": b"\xff\xff\xff\xfe",
"len": 4
},
{
"class": Int64,
"value": 1,
"little": b"\x01\x00\x00\x00\x00\x00\x00\x00",
"big": b"\x00\x00\x00\x00\x00\x00\x00\x01",
"len": 8
},
{
"class": Int64,
"value": -2,
"little": b"\xfe\xff\xff\xff\xff\xff\xff\xff",
"big": b"\xff\xff\xff\xff\xff\xff\xff\xfe",
"len": 8
}
]
for test in test_set:
enum = Enum(*self.enum_params, format_type=test["class"])
self.assertEqual(enum.format(test["value"], {"endian": base.LittleEndian}), test["little"])
self.assertEqual(enum.parse(test["little"], {"endian": base.LittleEndian}), test["value"])
self.assertEqual(enum.format(test["value"], {"endian": base.BigEndian}), test["big"])
self.assertEqual(enum.parse(test["big"], {"endian": base.BigEndian}), test["value"])
self.assertEqual(len(enum), test["len"])
if __name__ == '__main__':
unittest.main() | true | true |
f7fdafe8c337ae34453ed39560ea6713686a55a4 | 11,404 | py | Python | consensus/poet/core/sawtooth_poet/poet_consensus/poet_block_verifier.py | lcarranco/sawtooth-core | 70cd65bfe4204545501d73f748d908e6695828f3 | [
"Apache-2.0"
] | null | null | null | consensus/poet/core/sawtooth_poet/poet_consensus/poet_block_verifier.py | lcarranco/sawtooth-core | 70cd65bfe4204545501d73f748d908e6695828f3 | [
"Apache-2.0"
] | 1 | 2021-12-09T23:11:26.000Z | 2021-12-09T23:11:26.000Z | consensus/poet/core/sawtooth_poet/poet_consensus/poet_block_verifier.py | lcarranco/sawtooth-core | 70cd65bfe4204545501d73f748d908e6695828f3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from sawtooth_validator.journal.block_wrapper import BlockWrapper
from sawtooth_validator.journal.consensus.consensus \
import BlockVerifierInterface
from sawtooth_poet.poet_consensus.consensus_state_store \
import ConsensusStateStore
from sawtooth_poet.poet_consensus.poet_config_view import PoetConfigView
from sawtooth_poet.poet_consensus import poet_enclave_factory as factory
from sawtooth_poet.poet_consensus import utils
from sawtooth_poet.poet_consensus.wait_timer import WaitTimer
from sawtooth_poet_common.validator_registry_view.validator_registry_view \
import ValidatorRegistryView
LOGGER = logging.getLogger(__name__)
class PoetBlockVerifier(BlockVerifierInterface):
"""BlockVerifier provides services for the Journal(ChainController) to
determine if a block is valid (for the consensus rules) to be
considered as part of the fork being evaluated. BlockVerifier must be
independent of block publishing activities.
"""
def __init__(self,
block_cache,
state_view_factory,
data_dir,
validator_id):
"""Initialize the object, is passed (read-only) state access objects.
Args:
block_cache (BlockCache): Dict interface to the block cache.
Any predecessor block to blocks handed to this object will
be present in this dict.
state_view_factory (StateViewFactory): A factory that can be
used to create read-only views of state for a particular
merkle root, in particular the state as it existed when a
particular block was the chain head.
data_dir (str): path to location where persistent data for the
consensus module can be stored.
validator_id (str): A unique ID for this validator
Returns:
none.
"""
super().__init__(
block_cache,
state_view_factory,
data_dir,
validator_id)
self._block_cache = block_cache
self._state_view_factory = state_view_factory
self._data_dir = data_dir
self._validator_id = validator_id
self._consensus_state_store = \
ConsensusStateStore(
data_dir=self._data_dir,
validator_id=self._validator_id)
def verify_block(self, block_wrapper):
"""Check that the block received conforms to the consensus rules.
Args:
block_wrapper (BlockWrapper): The block to validate.
Returns:
Boolean: True if the Block is valid, False if the block is invalid.
"""
# Get the state view for the previous block in the chain so we can
# create a PoET enclave and validator registry view
previous_block = None
try:
previous_block = \
self._block_cache[block_wrapper.previous_block_id]
except KeyError:
pass
state_view = \
BlockWrapper.state_view_for_block(
block_wrapper=previous_block,
state_view_factory=self._state_view_factory)
poet_enclave_module = \
factory.PoetEnclaveFactory.get_poet_enclave_module(state_view)
validator_registry_view = ValidatorRegistryView(state_view)
try:
# Grab the validator info based upon the block signer's public
# key
try:
validator_info = \
validator_registry_view.get_validator_info(
block_wrapper.header.signer_pubkey)
except KeyError:
raise \
ValueError(
'Received block from an unregistered validator '
'{}...{}'.format(
block_wrapper.header.signer_pubkey[:8],
block_wrapper.header.signer_pubkey[-8:]))
LOGGER.debug(
'Block Signer Name=%s, ID=%s...%s, PoET public key='
'%s...%s',
validator_info.name,
validator_info.id[:8],
validator_info.id[-8:],
validator_info.signup_info.poet_public_key[:8],
validator_info.signup_info.poet_public_key[-8:])
# Create a list of certificates leading up to this block.
# This seems to have a little too much knowledge of the
# WaitTimer implementation, but there is no use getting more
# than WaitTimer.certificate_sample_length wait certificates.
certificates = \
utils.build_certificate_list(
block_header=block_wrapper.header,
block_cache=self._block_cache,
poet_enclave_module=poet_enclave_module,
maximum_number=WaitTimer.certificate_sample_length)
# For the candidate block, reconstitute the wait certificate
# and verify that it is valid
wait_certificate = \
utils.deserialize_wait_certificate(
block=block_wrapper,
poet_enclave_module=poet_enclave_module)
if wait_certificate is None:
raise \
ValueError(
'Being asked to verify a block that was not '
'created by PoET consensus module')
poet_public_key = \
validator_info.signup_info.poet_public_key
wait_certificate.check_valid(
poet_enclave_module=poet_enclave_module,
certificates=certificates,
poet_public_key=poet_public_key)
# Get the consensus state for the block that is being built
# upon, fetch the validator state for this validator, and then
# see if that validator has already claimed the key bock limit
# for its current PoET key pair. If so, then we reject the
# block.
consensus_state = \
utils.get_consensus_state_for_block_id(
block_id=block_wrapper.previous_block_id,
block_cache=self._block_cache,
state_view_factory=self._state_view_factory,
consensus_state_store=self._consensus_state_store,
poet_enclave_module=poet_enclave_module)
validator_state = \
utils.get_current_validator_state(
validator_info=validator_info,
consensus_state=consensus_state,
block_cache=self._block_cache)
poet_config_view = PoetConfigView(state_view=state_view)
if validator_state.poet_public_key == poet_public_key and \
validator_state.key_block_claim_count >= \
poet_config_view.key_block_claim_limit:
raise \
ValueError(
'Validator {} has already reached claim block limit '
'for current PoET key pair: {} >= {}'.format(
validator_info.name,
validator_state.key_block_claim_count,
poet_config_view.key_block_claim_limit))
# While having a block claim delay is nice, it turns out that in
# practice the claim delay should not be more than one less than
# the number of validators. It helps to imagine the scenario
# where each validator hits their block claim limit in sequential
# blocks and their new validator registry information is updated
# in the following block by another validator, assuming that there
# were no forks. If there are N validators, once all N validators
# have updated their validator registry information, there will
# have been N-1 block commits and the Nth validator will only be
# able to get its updated validator registry information updated
# if the first validator that kicked this off is now able to claim
# a block. If the block claim delay was greater than or equal to
# the number of validators, at this point no validators would be
# able to claim a block.
number_of_validators = \
len(validator_registry_view.get_validators())
block_claim_delay = \
min(
poet_config_view.block_claim_delay,
number_of_validators - 1)
# While a validator network is starting up, we need to be careful
# about applying the block claim delay because if we are too
# aggressive we will get ourselves into a situation where the
# block claim delay will prevent any validators from claiming
# blocks. So, until we get at least block_claim_delay blocks
# we are going to choose not to enforce the delay.
if consensus_state.total_block_claim_count <= block_claim_delay:
LOGGER.debug(
'Skipping block claim delay check. Only %d block(s) in '
'the chain. Claim delay is %d block(s). %d validator(s) '
'registered.',
consensus_state.total_block_claim_count,
block_claim_delay,
number_of_validators)
return True
blocks_since_registration = \
block_wrapper.block_num - \
validator_state.commit_block_number - 1
if block_claim_delay > blocks_since_registration:
raise \
ValueError(
'Validator {} claiming too early. Block: {}, '
'registered in: {}, wait until after: {}.'.format(
validator_info.name,
block_wrapper.block_num,
validator_state.commit_block_number,
validator_state.commit_block_number +
block_claim_delay))
LOGGER.debug(
'%d block(s) claimed since %s was registered and block '
'claim delay is %d block(s). Check passed.',
blocks_since_registration,
validator_info.name,
block_claim_delay)
except ValueError as error:
LOGGER.error('Failed to verify block: %s', error)
return False
return True
| 45.434263 | 80 | 0.59786 |
import logging
from sawtooth_validator.journal.block_wrapper import BlockWrapper
from sawtooth_validator.journal.consensus.consensus \
import BlockVerifierInterface
from sawtooth_poet.poet_consensus.consensus_state_store \
import ConsensusStateStore
from sawtooth_poet.poet_consensus.poet_config_view import PoetConfigView
from sawtooth_poet.poet_consensus import poet_enclave_factory as factory
from sawtooth_poet.poet_consensus import utils
from sawtooth_poet.poet_consensus.wait_timer import WaitTimer
from sawtooth_poet_common.validator_registry_view.validator_registry_view \
import ValidatorRegistryView
LOGGER = logging.getLogger(__name__)
class PoetBlockVerifier(BlockVerifierInterface):
def __init__(self,
block_cache,
state_view_factory,
data_dir,
validator_id):
super().__init__(
block_cache,
state_view_factory,
data_dir,
validator_id)
self._block_cache = block_cache
self._state_view_factory = state_view_factory
self._data_dir = data_dir
self._validator_id = validator_id
self._consensus_state_store = \
ConsensusStateStore(
data_dir=self._data_dir,
validator_id=self._validator_id)
def verify_block(self, block_wrapper):
previous_block = None
try:
previous_block = \
self._block_cache[block_wrapper.previous_block_id]
except KeyError:
pass
state_view = \
BlockWrapper.state_view_for_block(
block_wrapper=previous_block,
state_view_factory=self._state_view_factory)
poet_enclave_module = \
factory.PoetEnclaveFactory.get_poet_enclave_module(state_view)
validator_registry_view = ValidatorRegistryView(state_view)
try:
# key
try:
validator_info = \
validator_registry_view.get_validator_info(
block_wrapper.header.signer_pubkey)
except KeyError:
raise \
ValueError(
'Received block from an unregistered validator '
'{}...{}'.format(
block_wrapper.header.signer_pubkey[:8],
block_wrapper.header.signer_pubkey[-8:]))
LOGGER.debug(
'Block Signer Name=%s, ID=%s...%s, PoET public key='
'%s...%s',
validator_info.name,
validator_info.id[:8],
validator_info.id[-8:],
validator_info.signup_info.poet_public_key[:8],
validator_info.signup_info.poet_public_key[-8:])
# Create a list of certificates leading up to this block.
# This seems to have a little too much knowledge of the
# WaitTimer implementation, but there is no use getting more
# than WaitTimer.certificate_sample_length wait certificates.
certificates = \
utils.build_certificate_list(
block_header=block_wrapper.header,
block_cache=self._block_cache,
poet_enclave_module=poet_enclave_module,
maximum_number=WaitTimer.certificate_sample_length)
# For the candidate block, reconstitute the wait certificate
# and verify that it is valid
wait_certificate = \
utils.deserialize_wait_certificate(
block=block_wrapper,
poet_enclave_module=poet_enclave_module)
if wait_certificate is None:
raise \
ValueError(
'Being asked to verify a block that was not '
'created by PoET consensus module')
poet_public_key = \
validator_info.signup_info.poet_public_key
wait_certificate.check_valid(
poet_enclave_module=poet_enclave_module,
certificates=certificates,
poet_public_key=poet_public_key)
# Get the consensus state for the block that is being built
# upon, fetch the validator state for this validator, and then
# see if that validator has already claimed the key bock limit
# for its current PoET key pair. If so, then we reject the
# block.
consensus_state = \
utils.get_consensus_state_for_block_id(
block_id=block_wrapper.previous_block_id,
block_cache=self._block_cache,
state_view_factory=self._state_view_factory,
consensus_state_store=self._consensus_state_store,
poet_enclave_module=poet_enclave_module)
validator_state = \
utils.get_current_validator_state(
validator_info=validator_info,
consensus_state=consensus_state,
block_cache=self._block_cache)
poet_config_view = PoetConfigView(state_view=state_view)
if validator_state.poet_public_key == poet_public_key and \
validator_state.key_block_claim_count >= \
poet_config_view.key_block_claim_limit:
raise \
ValueError(
'Validator {} has already reached claim block limit '
'for current PoET key pair: {} >= {}'.format(
validator_info.name,
validator_state.key_block_claim_count,
poet_config_view.key_block_claim_limit))
# While having a block claim delay is nice, it turns out that in
# practice the claim delay should not be more than one less than
# the number of validators. It helps to imagine the scenario
# where each validator hits their block claim limit in sequential
# blocks and their new validator registry information is updated
# in the following block by another validator, assuming that there
# were no forks. If there are N validators, once all N validators
# have updated their validator registry information, there will
# have been N-1 block commits and the Nth validator will only be
# able to get its updated validator registry information updated
# if the first validator that kicked this off is now able to claim
# a block. If the block claim delay was greater than or equal to
# the number of validators, at this point no validators would be
# able to claim a block.
number_of_validators = \
len(validator_registry_view.get_validators())
block_claim_delay = \
min(
poet_config_view.block_claim_delay,
number_of_validators - 1)
# While a validator network is starting up, we need to be careful
# about applying the block claim delay because if we are too
# aggressive we will get ourselves into a situation where the
# block claim delay will prevent any validators from claiming
# blocks. So, until we get at least block_claim_delay blocks
# we are going to choose not to enforce the delay.
if consensus_state.total_block_claim_count <= block_claim_delay:
LOGGER.debug(
'Skipping block claim delay check. Only %d block(s) in '
'the chain. Claim delay is %d block(s). %d validator(s) '
'registered.',
consensus_state.total_block_claim_count,
block_claim_delay,
number_of_validators)
return True
blocks_since_registration = \
block_wrapper.block_num - \
validator_state.commit_block_number - 1
if block_claim_delay > blocks_since_registration:
raise \
ValueError(
'Validator {} claiming too early. Block: {}, '
'registered in: {}, wait until after: {}.'.format(
validator_info.name,
block_wrapper.block_num,
validator_state.commit_block_number,
validator_state.commit_block_number +
block_claim_delay))
LOGGER.debug(
'%d block(s) claimed since %s was registered and block '
'claim delay is %d block(s). Check passed.',
blocks_since_registration,
validator_info.name,
block_claim_delay)
except ValueError as error:
LOGGER.error('Failed to verify block: %s', error)
return False
return True
| true | true |
f7fdb00a62d1bf93df847c6c65ca2d66bf0c730d | 20,409 | py | Python | pypureclient/flasharray/FA_2_13/api/api_clients_api.py | ashahid-ps/py-pure-client | 2e3565d37b2a41db69308769f6f485d08a7c46c3 | [
"BSD-2-Clause"
] | null | null | null | pypureclient/flasharray/FA_2_13/api/api_clients_api.py | ashahid-ps/py-pure-client | 2e3565d37b2a41db69308769f6f485d08a7c46c3 | [
"BSD-2-Clause"
] | null | null | null | pypureclient/flasharray/FA_2_13/api/api_clients_api.py | ashahid-ps/py-pure-client | 2e3565d37b2a41db69308769f6f485d08a7c46c3 | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class APIClientsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api213_api_clients_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Delete an API client
Deletes an API client. The `ids` or `names` parameter is required, but cannot be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api213_api_clients_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.13/api-clients', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_api_clients_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ApiClientGetResponse
"""List API clients
Returns a list of API clients.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api213_api_clients_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ApiClientGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api213_api_clients_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api213_api_clients_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.13/api-clients', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiClientGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_api_clients_patch_with_http_info(
self,
api_clients=None, # type: models.ApiClientPatch
authorization=None, # type: str
x_request_id=None, # type: str
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ApiClientResponse
"""Manage an API client
Enables or disables an API client. The `ids` or `names` parameter is required, but cannot be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api213_api_clients_patch_with_http_info(api_clients, async_req=True)
>>> result = thread.get()
:param ApiClientPatch api_clients: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ApiClientResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'api_clients' is set
if api_clients is None:
raise TypeError("Missing the required parameter `api_clients` when calling `api213_api_clients_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'api_clients' in params:
body_params = params['api_clients']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.13/api-clients', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiClientResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_api_clients_post_with_http_info(
self,
api_clients=None, # type: models.ApiClientPost
authorization=None, # type: str
x_request_id=None, # type: str
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ApiClientResponse
"""Create an API client
Creates an API client. Newly created API clients are disabled by default. Enable an API client through the `PATCH` method. The `names`, `max_role`, `issuer`, and `public_key` parameters are required.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api213_api_clients_post_with_http_info(api_clients, async_req=True)
>>> result = thread.get()
:param ApiClientPost api_clients: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ApiClientResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'api_clients' is set
if api_clients is None:
raise TypeError("Missing the required parameter `api_clients` when calling `api213_api_clients_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'api_clients' in params:
body_params = params['api_clients']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.13/api-clients', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiClientResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 45.76009 | 671 | 0.634377 |
from __future__ import absolute_import
import re
import six
from typing import List, Optional
from .. import models
class APIClientsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api213_api_clients_delete_with_http_info(
self,
authorization=None,
x_request_id=None,
ids=None,
names=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/2.13/api-clients', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_api_clients_get_with_http_info(
self,
authorization=None,
x_request_id=None,
filter=None,
ids=None,
limit=None,
names=None,
offset=None,
sort=None,
total_item_count=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api213_api_clients_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api213_api_clients_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/2.13/api-clients', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiClientGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_api_clients_patch_with_http_info(
self,
api_clients=None,
authorization=None,
x_request_id=None,
ids=None,
names=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if api_clients is None:
raise TypeError("Missing the required parameter `api_clients` when calling `api213_api_clients_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'api_clients' in params:
body_params = params['api_clients']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/2.13/api-clients', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiClientResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api213_api_clients_post_with_http_info(
self,
api_clients=None,
authorization=None,
x_request_id=None,
names=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if api_clients is None:
raise TypeError("Missing the required parameter `api_clients` when calling `api213_api_clients_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'api_clients' in params:
body_params = params['api_clients']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/2.13/api-clients', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiClientResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| true | true |
f7fdb110597e8701a229c068eb8c48bc251019cd | 952 | py | Python | tests/functional/branching/black_box_with_y.py | satyaog/orion | 671dbb3fb51bf8f49f0146101b749f70166896a6 | [
"BSD-3-Clause"
] | 1 | 2017-09-07T06:20:39.000Z | 2017-09-07T06:20:39.000Z | tests/functional/branching/black_box_with_y.py | satyaog/orion | 671dbb3fb51bf8f49f0146101b749f70166896a6 | [
"BSD-3-Clause"
] | null | null | null | tests/functional/branching/black_box_with_y.py | satyaog/orion | 671dbb3fb51bf8f49f0146101b749f70166896a6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple one dimensional example for a possible user's script."""
import argparse
from orion.client import report_results
def function(x, y):
"""Evaluate partial information of a quadratic."""
y = x + y - 34.56789
return 4 * y**2 + 23.4, 8 * y
def execute():
"""Execute a simple pipeline as an example."""
# 1. Receive inputs as you want
parser = argparse.ArgumentParser()
parser.add_argument("-x", type=float, required=True)
parser.add_argument("-y", type=float, default=0.0)
inputs = parser.parse_args()
# 2. Perform computations
y, dy = function(inputs.x, inputs.y)
# 3. Gather and report results
results = list()
results.append(dict(name="example_objective", type="objective", value=y))
results.append(dict(name="example_gradient", type="gradient", value=[dy]))
report_results(results)
if __name__ == "__main__":
execute()
| 26.444444 | 78 | 0.661765 |
import argparse
from orion.client import report_results
def function(x, y):
y = x + y - 34.56789
return 4 * y**2 + 23.4, 8 * y
def execute():
parser = argparse.ArgumentParser()
parser.add_argument("-x", type=float, required=True)
parser.add_argument("-y", type=float, default=0.0)
inputs = parser.parse_args()
y, dy = function(inputs.x, inputs.y)
results = list()
results.append(dict(name="example_objective", type="objective", value=y))
results.append(dict(name="example_gradient", type="gradient", value=[dy]))
report_results(results)
if __name__ == "__main__":
execute()
| true | true |
f7fdb208baa0390cfac7fe73b1618af8b1312615 | 5,780 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/module_utils/network/aos/aos.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/module_utils/network/aos/aos.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/module_utils/network/aos/aos.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #
# Copyright (c) 2017 Apstra Inc, <community@apstra.com>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Apstra AOS modules
In order to use this module, include it as part of your module
from ansible.module_utils.network.aos.aos import (check_aos_version, get_aos_session, find_collection_item,
content_to_dict, do_load_resource)
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
from distutils.version import LooseVersion
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils._text import to_native
def check_aos_version(module, min=False):
"""
Check if the library aos-pyez is present.
If provided, also check if the minimum version requirement is met
"""
if not HAS_AOS_PYEZ:
module.fail_json(msg='aos-pyez is not installed. Please see details '
'here: https://github.com/Apstra/aos-pyez')
elif min:
import apstra.aosom
AOS_PYEZ_VERSION = apstra.aosom.__version__
if LooseVersion(AOS_PYEZ_VERSION) < LooseVersion(min):
module.fail_json(msg='aos-pyez >= %s is required for this module' % min)
return True
def get_aos_session(module, auth):
"""
Resume an existing session and return an AOS object.
Args:
auth (dict): An AOS session as obtained by aos_login module blocks::
dict( token=<token>,
server=<ip>,
port=<port>
)
Return:
Aos object
"""
check_aos_version(module)
aos = Session()
aos.session = auth
return aos
def find_collection_item(collection, item_name=False, item_id=False):
"""
Find collection_item based on name or id from a collection object
Both Collection_item and Collection Objects are provided by aos-pyez library
Return
collection_item: object corresponding to the collection type
"""
my_dict = None
if item_name:
my_dict = collection.find(label=item_name)
elif item_id:
my_dict = collection.find(uid=item_id)
if my_dict is None:
return collection['']
else:
return my_dict
def content_to_dict(module, content):
"""
Convert 'content' into a Python Dict based on 'content_format'
"""
# if not HAS_YAML:
# module.fail_json(msg="Python Library Yaml is not present, mandatory to use 'content'")
content_dict = None
# try:
# content_dict = json.loads(content.replace("\'", '"'))
# except:
# module.fail_json(msg="Unable to convert 'content' from JSON, please check if valid")
#
# elif format in ['yaml', 'var']:
try:
content_dict = yaml.safe_load(content)
if not isinstance(content_dict, dict):
raise Exception()
# Check if dict is empty and return an error if it's
if not content_dict:
raise Exception()
except Exception:
module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid")
# replace the string with the dict
module.params['content'] = content_dict
return content_dict
def do_load_resource(module, collection, name):
"""
Create a new object (collection.item) by loading a datastructure directly
"""
try:
item = find_collection_item(collection, name, '')
except Exception:
module.fail_json(msg="An error occurred while running 'find_collection_item'")
if item.exists:
module.exit_json(changed=False, name=item.name, id=item.id, value=item.value)
# If not in check mode, apply the changes
if not module.check_mode:
try:
item.datum = module.params['content']
item.write()
except Exception as e:
module.fail_json(msg="Unable to write item content : %r" % to_native(e))
module.exit_json(changed=True, name=item.name, id=item.id, value=item.value)
| 31.413043 | 107 | 0.686851 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
from distutils.version import LooseVersion
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils._text import to_native
def check_aos_version(module, min=False):
if not HAS_AOS_PYEZ:
module.fail_json(msg='aos-pyez is not installed. Please see details '
'here: https://github.com/Apstra/aos-pyez')
elif min:
import apstra.aosom
AOS_PYEZ_VERSION = apstra.aosom.__version__
if LooseVersion(AOS_PYEZ_VERSION) < LooseVersion(min):
module.fail_json(msg='aos-pyez >= %s is required for this module' % min)
return True
def get_aos_session(module, auth):
check_aos_version(module)
aos = Session()
aos.session = auth
return aos
def find_collection_item(collection, item_name=False, item_id=False):
my_dict = None
if item_name:
my_dict = collection.find(label=item_name)
elif item_id:
my_dict = collection.find(uid=item_id)
if my_dict is None:
return collection['']
else:
return my_dict
def content_to_dict(module, content):
content_dict = None
# except:
# module.fail_json(msg="Unable to convert 'content' from JSON, please check if valid")
#
# elif format in ['yaml', 'var']:
try:
content_dict = yaml.safe_load(content)
if not isinstance(content_dict, dict):
raise Exception()
# Check if dict is empty and return an error if it's
if not content_dict:
raise Exception()
except Exception:
module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid")
# replace the string with the dict
module.params['content'] = content_dict
return content_dict
def do_load_resource(module, collection, name):
try:
item = find_collection_item(collection, name, '')
except Exception:
module.fail_json(msg="An error occurred while running 'find_collection_item'")
if item.exists:
module.exit_json(changed=False, name=item.name, id=item.id, value=item.value)
# If not in check mode, apply the changes
if not module.check_mode:
try:
item.datum = module.params['content']
item.write()
except Exception as e:
module.fail_json(msg="Unable to write item content : %r" % to_native(e))
module.exit_json(changed=True, name=item.name, id=item.id, value=item.value)
| true | true |
f7fdb318437a941360b8499d5a8e3f2e59604c63 | 7,565 | py | Python | ALREC_Method/stmarc/train_new_method_v4_for_atd.py | proy3/Abnormal_Trajectory_Classifier | a6b27c6847262e9703a0f3404c85c135415c1d4c | [
"MIT"
] | 6 | 2019-10-29T03:05:14.000Z | 2022-03-18T05:14:25.000Z | ALREC_Method/rene/train_new_method_v4_for_atd.py | proy3/Abnormal_Trajectory_Classifier | a6b27c6847262e9703a0f3404c85c135415c1d4c | [
"MIT"
] | 1 | 2022-03-11T03:49:34.000Z | 2022-03-11T03:49:34.000Z | ALREC_Method/rouen/train_new_method_v4_for_atd.py | proy3/Abnormal_Trajectory_Classifier | a6b27c6847262e9703a0f3404c85c135415c1d4c | [
"MIT"
] | 1 | 2021-12-15T09:21:26.000Z | 2021-12-15T09:21:26.000Z | """
Train Abnormal trajectory detection with deep autoencoder.
"""
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import ae_utilities as aeu
import input_data as data
import abnormal_data_generation as adg
import dataset_defines as dd
import numpy as np
import os
abspath = os.path.abspath(__file__)
dir_name = os.path.dirname(abspath)
dataset_name = dir_name[dir_name.rfind('/')+1:] + '_gt_data.csv'
dataset_file_path = os.path.join(dir_name + '/data', dataset_name)
abnormal_name = dir_name[dir_name.rfind('/')+1:] + '_gt_real_abnormal_2.csv'
abnormal_file_path = os.path.join(dir_name + '/data', abnormal_name)
# Extract trajectories and export data to array
dataset = np.genfromtxt(dataset_file_path, delimiter=',')
# Ignore first column representing object_id
dataset = dataset[:,1:]
# Generate abnormal data
abnormal_data = np.genfromtxt(abnormal_file_path, delimiter=',')
abnormal_data = abnormal_data[:,1:]
# Best layer type tested in main_test.py
# ref.: https://deeplearning4j.org/deepautoencoder
best_layer_type = (128,64,32,16,8)
# Files setup
test_score_filename = 'results/new_method_v4_5/test_scores.csv'
summary_results_filename = test_score_filename[:test_score_filename.rfind('/')] + '/summary_results.csv'
global_summary_filename = test_score_filename[:test_score_filename.rfind('/')] + '/global_summary.log'
model_files_dir_name = 'model/new_method_v4_5/'
data.make_dir_if_new(test_score_filename)
data.make_dir_if_new(model_files_dir_name)
n_acc_list = []
v_acc_list = []
t_acc_list = []
ae_n_acc_list = []
ae_v_acc_list = []
ae_t_acc_list = []
for i in range(aeu.repeat_number):
print('======================== Iteration {} ========================'.format(i))
# Shuffle the data by row only
# and get the seed in order to reproduce the random sequence
train_data, validation_data, random_shuffle_seed = data.split_dataset_uniformly(dataset)
# The trained model will be saved
saved_ae_network_path = os.path.join(data.dir_name, model_files_dir_name)
mv4 = aeu.BuildOurMethodV4(original_dim=train_data.shape[1],
hidden_units=best_layer_type,
model_dir_path=saved_ae_network_path,
iteration_number=i)
mv4.train(train_data=train_data,
save_model=True,
print_and_plot_history=True,
show_plots=False)
n_loss, n_acc, ae_n_mse, ae_n_mses = mv4.test_model(test_data=train_data, test_ae=True)
v_loss, v_acc, ae_v_mse, ae_v_mses = mv4.test_model(test_data=validation_data, test_ae=True)
t_loss, t_acc, ae_t_mse, ae_t_mses = mv4.test_model(test_data=abnormal_data, is_abnormal=True, test_ae=True)
output_string = 'Iteration {} with layer type {}: n_loss = {}; v_loss = {}; t_loss = {}'\
.format(i, best_layer_type, n_loss, v_loss, t_loss)
print('\n')
# Save the result to a global summary file
output_string += '\n'
# Compute the threshold value for the autoencoder method. Used for the comparison purpose
ae_threshold = ae_n_mse + ae_v_mse + 3 * (np.std(ae_n_mses) + np.std(ae_v_mses))
# Compute the accuracy using the old method: only using the autoencoder with the computed threshold
ae_n_acc = sum([score < ae_threshold for score in ae_n_mses])/float(len(ae_n_mses))
ae_v_acc = sum([score < ae_threshold for score in ae_v_mses])/float(len(ae_v_mses))
ae_t_acc = sum([score > ae_threshold for score in ae_t_mses])/float(len(ae_t_mses))
# Summary file format: [Iteration, ae_train_score, ae_validate_score, threshold_value,
# normal_train_ratio, normal_valid_ratio, abnormal_ratio]
if i == 0:
with open(os.path.join(data.dir_name, summary_results_filename), 'wb') as summary_file:
summary_file.write(b'iteration,random_shuffle_seed,ae_threshold,ae_n_acc,ae_v_acc,ae_t_acc,'
b'n_loss,n_acc,v_loss,v_acc,t_loss,t_acc\n')
n_acc_list.append(n_acc*100.0)
v_acc_list.append(v_acc*100.0)
t_acc_list.append(t_acc*100.0)
ae_n_acc_list.append(ae_n_acc*100.0)
ae_v_acc_list.append(ae_v_acc*100.0)
ae_t_acc_list.append(ae_t_acc*100.0)
with open(os.path.join(data.dir_name, summary_results_filename), 'ab') as summary_file:
np.savetxt(summary_file, np.array([i,random_shuffle_seed,ae_threshold,ae_n_acc,ae_v_acc,ae_t_acc,
n_loss,n_acc,v_loss,v_acc,t_loss,t_acc]).reshape(1, -1),delimiter=',')
output_string += '{:.2f}% (old: {:.2f}%) of normal train samples are detected as normal.\n'.format(n_acc*100.0,
ae_n_acc*100.0)
output_string += '{:.2f}% (old: {:.2f}%) of normal valid samples are detected as normal.\n'.format(v_acc*100.0,
ae_v_acc*100.0)
output_string += '{:.2f}% (old: {:.2f}%) of abnormal samples are detected as abnormal.\n'.format(t_acc*100.0,
ae_t_acc*100.0)
print(output_string)
print('==============================================================')
# Global summary
global_summary_file = open(global_summary_filename, 'w')
output_string = 'Global summary of abnormal trajectory detection with our new method v.4\n'
output_string += '-----------------------------------------------------------------------\n'
output_string += 'On average, using layer type {},\n'.format(best_layer_type)
output_string += '\t{:.2f}% (old:{:.2f}%) of normal training samples are detected as normal;\n'.format(
np.mean(n_acc_list), np.mean(ae_n_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of normal validation samples are detected as normal;\n'.format(
np.mean(v_acc_list), np.mean(ae_v_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of abnormal samples are detected as abnormal.\n'.format(
np.mean(t_acc_list), np.mean(ae_t_acc_list))
output_string += '-----------------------------------------------------------------------\n'
output_string += 'On maximum, using layer type {},\n'.format(best_layer_type)
output_string += '\t{:.2f}% (old:{:.2f}%) of normal training samples are detected as normal;\n'.format(
np.max(n_acc_list), np.max(ae_n_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of normal validation samples are detected as normal;\n'.format(
np.max(v_acc_list), np.max(ae_v_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of abnormal samples are detected as abnormal.\n'.format(
np.max(t_acc_list), np.max(ae_t_acc_list))
output_string += '-----------------------------------------------------------------------\n'
output_string += 'On minimum, using layer type {},\n'.format(best_layer_type)
output_string += '\t{:.2f}% (old:{:.2f}%) of normal training samples are detected as normal;\n'.format(
np.min(n_acc_list), np.min(ae_n_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of normal validation samples are detected as normal;\n'.format(
np.min(v_acc_list), np.min(ae_v_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of abnormal samples are detected as abnormal.\n'.format(
np.min(t_acc_list), np.min(ae_t_acc_list))
output_string += '-----------------------------------------------------------------------\n'
global_summary_file.write(output_string)
print(output_string)
global_summary_file.close()
| 49.444444 | 118 | 0.645605 | import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import ae_utilities as aeu
import input_data as data
import abnormal_data_generation as adg
import dataset_defines as dd
import numpy as np
import os
abspath = os.path.abspath(__file__)
dir_name = os.path.dirname(abspath)
dataset_name = dir_name[dir_name.rfind('/')+1:] + '_gt_data.csv'
dataset_file_path = os.path.join(dir_name + '/data', dataset_name)
abnormal_name = dir_name[dir_name.rfind('/')+1:] + '_gt_real_abnormal_2.csv'
abnormal_file_path = os.path.join(dir_name + '/data', abnormal_name)
dataset = np.genfromtxt(dataset_file_path, delimiter=',')
dataset = dataset[:,1:]
abnormal_data = np.genfromtxt(abnormal_file_path, delimiter=',')
abnormal_data = abnormal_data[:,1:]
best_layer_type = (128,64,32,16,8)
test_score_filename = 'results/new_method_v4_5/test_scores.csv'
summary_results_filename = test_score_filename[:test_score_filename.rfind('/')] + '/summary_results.csv'
global_summary_filename = test_score_filename[:test_score_filename.rfind('/')] + '/global_summary.log'
model_files_dir_name = 'model/new_method_v4_5/'
data.make_dir_if_new(test_score_filename)
data.make_dir_if_new(model_files_dir_name)
n_acc_list = []
v_acc_list = []
t_acc_list = []
ae_n_acc_list = []
ae_v_acc_list = []
ae_t_acc_list = []
for i in range(aeu.repeat_number):
print('======================== Iteration {} ========================'.format(i))
train_data, validation_data, random_shuffle_seed = data.split_dataset_uniformly(dataset)
saved_ae_network_path = os.path.join(data.dir_name, model_files_dir_name)
mv4 = aeu.BuildOurMethodV4(original_dim=train_data.shape[1],
hidden_units=best_layer_type,
model_dir_path=saved_ae_network_path,
iteration_number=i)
mv4.train(train_data=train_data,
save_model=True,
print_and_plot_history=True,
show_plots=False)
n_loss, n_acc, ae_n_mse, ae_n_mses = mv4.test_model(test_data=train_data, test_ae=True)
v_loss, v_acc, ae_v_mse, ae_v_mses = mv4.test_model(test_data=validation_data, test_ae=True)
t_loss, t_acc, ae_t_mse, ae_t_mses = mv4.test_model(test_data=abnormal_data, is_abnormal=True, test_ae=True)
output_string = 'Iteration {} with layer type {}: n_loss = {}; v_loss = {}; t_loss = {}'\
.format(i, best_layer_type, n_loss, v_loss, t_loss)
print('\n')
output_string += '\n'
ae_threshold = ae_n_mse + ae_v_mse + 3 * (np.std(ae_n_mses) + np.std(ae_v_mses))
ae_n_acc = sum([score < ae_threshold for score in ae_n_mses])/float(len(ae_n_mses))
ae_v_acc = sum([score < ae_threshold for score in ae_v_mses])/float(len(ae_v_mses))
ae_t_acc = sum([score > ae_threshold for score in ae_t_mses])/float(len(ae_t_mses))
if i == 0:
with open(os.path.join(data.dir_name, summary_results_filename), 'wb') as summary_file:
summary_file.write(b'iteration,random_shuffle_seed,ae_threshold,ae_n_acc,ae_v_acc,ae_t_acc,'
b'n_loss,n_acc,v_loss,v_acc,t_loss,t_acc\n')
n_acc_list.append(n_acc*100.0)
v_acc_list.append(v_acc*100.0)
t_acc_list.append(t_acc*100.0)
ae_n_acc_list.append(ae_n_acc*100.0)
ae_v_acc_list.append(ae_v_acc*100.0)
ae_t_acc_list.append(ae_t_acc*100.0)
with open(os.path.join(data.dir_name, summary_results_filename), 'ab') as summary_file:
np.savetxt(summary_file, np.array([i,random_shuffle_seed,ae_threshold,ae_n_acc,ae_v_acc,ae_t_acc,
n_loss,n_acc,v_loss,v_acc,t_loss,t_acc]).reshape(1, -1),delimiter=',')
output_string += '{:.2f}% (old: {:.2f}%) of normal train samples are detected as normal.\n'.format(n_acc*100.0,
ae_n_acc*100.0)
output_string += '{:.2f}% (old: {:.2f}%) of normal valid samples are detected as normal.\n'.format(v_acc*100.0,
ae_v_acc*100.0)
output_string += '{:.2f}% (old: {:.2f}%) of abnormal samples are detected as abnormal.\n'.format(t_acc*100.0,
ae_t_acc*100.0)
print(output_string)
print('==============================================================')
global_summary_file = open(global_summary_filename, 'w')
output_string = 'Global summary of abnormal trajectory detection with our new method v.4\n'
output_string += '-----------------------------------------------------------------------\n'
output_string += 'On average, using layer type {},\n'.format(best_layer_type)
output_string += '\t{:.2f}% (old:{:.2f}%) of normal training samples are detected as normal;\n'.format(
np.mean(n_acc_list), np.mean(ae_n_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of normal validation samples are detected as normal;\n'.format(
np.mean(v_acc_list), np.mean(ae_v_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of abnormal samples are detected as abnormal.\n'.format(
np.mean(t_acc_list), np.mean(ae_t_acc_list))
output_string += '-----------------------------------------------------------------------\n'
output_string += 'On maximum, using layer type {},\n'.format(best_layer_type)
output_string += '\t{:.2f}% (old:{:.2f}%) of normal training samples are detected as normal;\n'.format(
np.max(n_acc_list), np.max(ae_n_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of normal validation samples are detected as normal;\n'.format(
np.max(v_acc_list), np.max(ae_v_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of abnormal samples are detected as abnormal.\n'.format(
np.max(t_acc_list), np.max(ae_t_acc_list))
output_string += '-----------------------------------------------------------------------\n'
output_string += 'On minimum, using layer type {},\n'.format(best_layer_type)
output_string += '\t{:.2f}% (old:{:.2f}%) of normal training samples are detected as normal;\n'.format(
np.min(n_acc_list), np.min(ae_n_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of normal validation samples are detected as normal;\n'.format(
np.min(v_acc_list), np.min(ae_v_acc_list))
output_string += '\t{:.2f}% (old:{:.2f}%) of abnormal samples are detected as abnormal.\n'.format(
np.min(t_acc_list), np.min(ae_t_acc_list))
output_string += '-----------------------------------------------------------------------\n'
global_summary_file.write(output_string)
print(output_string)
global_summary_file.close()
| true | true |
f7fdb3971f8b15e5eafc6e529f1129f69771664e | 1,506 | py | Python | com/shbak/effective_python/_01_example/_74_memoryview_bytearray/main.py | sanghyunbak/effective_python | e35d880c47e988607e4a11aa6eb6b62ae887688a | [
"Apache-2.0"
] | null | null | null | com/shbak/effective_python/_01_example/_74_memoryview_bytearray/main.py | sanghyunbak/effective_python | e35d880c47e988607e4a11aa6eb6b62ae887688a | [
"Apache-2.0"
] | null | null | null | com/shbak/effective_python/_01_example/_74_memoryview_bytearray/main.py | sanghyunbak/effective_python | e35d880c47e988607e4a11aa6eb6b62ae887688a | [
"Apache-2.0"
] | null | null | null | import timeit
from socket import socket
from termcolor import colored
def print_memoryview():
data = b'shave and a haircut, two bits'
view = memoryview(data)
chunk = view[12:19]
print(colored(f'chunk: {chunk}', 'green'))
print(colored(f'size: {chunk.nbytes}', 'green'))
print(colored(f'view data: {chunk.tobytes()}', 'green'))
print(colored(f'inside data: {chunk.obj}', 'green'))
def bytes_and_bytearray_test():
my_bytes = b'hello'
try:
my_bytes[0] = '\x79'
except Exception as e:
print(colored(f'Exception occur: {e}', 'red'))
my_array = bytearray('hello '.encode('utf8'))
my_array[0] = 0x79
print(colored(f'my_array: {my_array}', 'green'))
my_array = bytearray('row, row, row your boat'.encode('utf8'))
my_view = memoryview(my_array)
write_view = my_view[3:13]
write_view[:] = b'-10 bytes-'
print(my_array)
def run_test():
my_array = bytearray('row, row, row your boat'.encode('utf8'))
my_view = memoryview(my_array)
write_view = my_view[3:13]
write_view[:] = b'-10 bytes-'
byte_offset = 10
size = len(write_view)
chunk = write_view[byte_offset:byte_offset + size]
# socket.recv_info(chunk)
def benchmark():
result = timeit.timeit(
stmt='run_test()',
globals=globals(),
number=100
) / 100
print(colored(f'{result:0.9f} sec', 'magenta'))
if __name__ == '__main__':
print_memoryview()
bytes_and_bytearray_test()
benchmark()
| 25.1 | 66 | 0.632802 | import timeit
from socket import socket
from termcolor import colored
def print_memoryview():
data = b'shave and a haircut, two bits'
view = memoryview(data)
chunk = view[12:19]
print(colored(f'chunk: {chunk}', 'green'))
print(colored(f'size: {chunk.nbytes}', 'green'))
print(colored(f'view data: {chunk.tobytes()}', 'green'))
print(colored(f'inside data: {chunk.obj}', 'green'))
def bytes_and_bytearray_test():
my_bytes = b'hello'
try:
my_bytes[0] = '\x79'
except Exception as e:
print(colored(f'Exception occur: {e}', 'red'))
my_array = bytearray('hello '.encode('utf8'))
my_array[0] = 0x79
print(colored(f'my_array: {my_array}', 'green'))
my_array = bytearray('row, row, row your boat'.encode('utf8'))
my_view = memoryview(my_array)
write_view = my_view[3:13]
write_view[:] = b'-10 bytes-'
print(my_array)
def run_test():
my_array = bytearray('row, row, row your boat'.encode('utf8'))
my_view = memoryview(my_array)
write_view = my_view[3:13]
write_view[:] = b'-10 bytes-'
byte_offset = 10
size = len(write_view)
chunk = write_view[byte_offset:byte_offset + size]
def benchmark():
result = timeit.timeit(
stmt='run_test()',
globals=globals(),
number=100
) / 100
print(colored(f'{result:0.9f} sec', 'magenta'))
if __name__ == '__main__':
print_memoryview()
bytes_and_bytearray_test()
benchmark()
| true | true |
f7fdb428b910bb6d6e131170f6089015f3491cb8 | 1,400 | py | Python | memoize/decorator.py | ECrownofFire/chaos | 0cfbb85ab52654967909aef54eff3a0e62b641bd | [
"MIT"
] | 1,804 | 2017-05-23T02:34:27.000Z | 2017-05-26T00:44:44.000Z | memoize/decorator.py | ECrownofFire/chaos | 0cfbb85ab52654967909aef54eff3a0e62b641bd | [
"MIT"
] | 345 | 2017-05-20T23:55:12.000Z | 2017-06-19T07:48:58.000Z | memoize/decorator.py | ECrownofFire/chaos | 0cfbb85ab52654967909aef54eff3a0e62b641bd | [
"MIT"
] | 248 | 2017-05-23T02:00:07.000Z | 2017-05-26T00:00:28.000Z | from functools import wraps
import time
import inspect
from . import helpers
def memoize(ttl_spec, whitelist=None, blacklist=None,
key_fn=helpers._json_keyify, backend=lambda fn: dict(),
get_now=time.time):
""" memoize/cache the decorated function for ttl amount of time """
ttl = helpers._time_code_to_seconds(ttl_spec)
def wrapper(fn):
sig = inspect.getfullargspec(fn)
cache = backend(fn)
@wraps(fn)
def wrapper2(*args, **kwargs):
# extract the arg names and values to use in our memoize key
to_use = helpers._extract_args(sig.args, sig.defaults, args, kwargs,
whitelist, blacklist)
# and construct our memoize key
key = key_fn(to_use)
now = get_now()
needs_refresh = True
# we have a cached value already, let's check if it's old and needs
# to be refreshed
if key in cache:
inserted, res = cache[key]
needs_refresh = now - inserted > ttl
# if it's old, re-call the decorated function and re-cache the
# result with a new timestamp
if needs_refresh:
res = fn(*args, **kwargs)
cache[key] = (now, res)
return res
return wrapper2
return wrapper
| 29.787234 | 80 | 0.566429 | from functools import wraps
import time
import inspect
from . import helpers
def memoize(ttl_spec, whitelist=None, blacklist=None,
key_fn=helpers._json_keyify, backend=lambda fn: dict(),
get_now=time.time):
ttl = helpers._time_code_to_seconds(ttl_spec)
def wrapper(fn):
sig = inspect.getfullargspec(fn)
cache = backend(fn)
@wraps(fn)
def wrapper2(*args, **kwargs):
to_use = helpers._extract_args(sig.args, sig.defaults, args, kwargs,
whitelist, blacklist)
key = key_fn(to_use)
now = get_now()
needs_refresh = True
if key in cache:
inserted, res = cache[key]
needs_refresh = now - inserted > ttl
# result with a new timestamp
if needs_refresh:
res = fn(*args, **kwargs)
cache[key] = (now, res)
return res
return wrapper2
return wrapper
| true | true |
f7fdb5778147d1140e0a87c95c5b25a36bd7de1a | 4,249 | py | Python | pi_turret/turret/turret.py | iot-foam-turret/pi-turret | 583d72a7625eb702ccde7afe66a085162854555a | [
"MIT"
] | 2 | 2021-05-12T21:13:14.000Z | 2022-03-27T15:24:08.000Z | pi_turret/turret/turret.py | iot-foam-turret/pi-turret | 583d72a7625eb702ccde7afe66a085162854555a | [
"MIT"
] | 7 | 2019-09-14T15:21:46.000Z | 2019-09-29T02:58:46.000Z | pi_turret/turret/turret.py | iot-foam-turret/pi-turret | 583d72a7625eb702ccde7afe66a085162854555a | [
"MIT"
] | null | null | null | """
Main Blaster Turret
"""
import time
import threading
from typing import Callable
import pi_turret.config as config
from pi_turret.blaster.hyperfire import Hyperfire
from pi_turret.stepper_motor.stepper import StepperMotor, STEP_DEGREES
from pi_turret.stepper_motor.stepper_slot import StepperMotorSlot
from pi_turret.sensor.button import yaw_button, pitch_button
from pi_turret.turret.mode import Mode
class Turret:
"""
Turret that can move left/right and up/down
"""
darts_per_second = 8
def __init__(self):
self.blaster = Hyperfire()
self.pitch_motor = StepperMotor(StepperMotorSlot.STEPPER_TWO)
self.yaw_motor = StepperMotor(StepperMotorSlot.STEPPER_ONE)
self.pitch = 0.0
self.yaw = 0.0
self.ammo = 22
self.mode = Mode.waiting
def calibrate(self):
"""
Calibrate the position of the stepper motors
"""
self.mode = Mode.calibrating
yaw_sensor = yaw_button()
while not yaw_sensor.is_pressed():
self.move_left()
for _ in range(75):
self.move_right()
pitch_sensor = pitch_button()
while not pitch_sensor.is_pressed():
self.move_up()
for _ in range(21):
self.move_down()
self.pitch = 0.0
self.yaw = 0.0
self.mode = Mode.waiting
def burst_fire(self, duration: float, completion: Callable = None):
if self.mode == Mode.firing:
return
self.mode = Mode.firing
def fire():
self.blaster.burst_fire(duration=duration)
self.mode = Mode.waiting
self.ammo = self.ammo - round(self.darts_per_second * duration)
self.ammo = self.ammo if self.ammo > 0 else 0
if completion is not None:
completion()
burst_fire_thread = threading.Thread(target=fire, daemon=True)
burst_fire_thread.start()
def move(self, pitch: float, yaw: float):
"""
Move the turret to the given pitch and yaw
"""
move_pitch = pitch - self.pitch
move_yaw = yaw - self.yaw
while move_pitch != 0 or move_yaw != 0:
if (move_pitch > 0 and move_pitch < (STEP_DEGREES / 2)) or (move_pitch < 0 and -move_pitch < (STEP_DEGREES / 2)):
move_pitch = 0
if self.pitch_stop():
move_pitch = 0
if move_pitch > 0:
self.pitch_motor.one_step_backwards()
self.pitch += STEP_DEGREES
move_pitch -= STEP_DEGREES
elif move_pitch < 0:
self.pitch_motor.one_step_forward()
self.pitch -= STEP_DEGREES
move_pitch += STEP_DEGREES
if (move_yaw > 0 and move_yaw < (STEP_DEGREES / 2)) or (move_yaw < 0 and -move_yaw < (STEP_DEGREES / 2)):
move_yaw = 0
if self.yaw_stop():
move_yaw = 0
if move_yaw > 0:
self.yaw_motor.one_step_forward()
self.yaw += STEP_DEGREES
move_yaw -= STEP_DEGREES
elif move_yaw < 0:
self.yaw_motor.one_step_backwards()
self.yaw -= STEP_DEGREES
move_yaw += STEP_DEGREES
time.sleep(0.02)
def yaw_stop(self):
"""
Return true if the yaw position is beyond the functional bound.
"""
return self.yaw > config.YAW_MAX or self.yaw < -config.YAW_MAX
def pitch_stop(self):
"""
Return true if the pitch position is beyond the functional bound.
"""
return self.pitch > config.PITCH_MAX or self.pitch < -config.PITCH_MAX
def move_up(self):
"""Move up one step
"""
self.pitch_motor.step_backward()
def move_down(self):
"""Move down one step
"""
self.pitch_motor.step_forward()
def move_left(self):
"""Move left one step
"""
self.yaw_motor.step_backward()
def move_right(self):
"""Move right one step
"""
self.yaw_motor.step_forward()
if __name__ == "__main__":
TURRET = Turret()
TURRET.calibrate()
while True:
pass
| 30.35 | 125 | 0.581549 | import time
import threading
from typing import Callable
import pi_turret.config as config
from pi_turret.blaster.hyperfire import Hyperfire
from pi_turret.stepper_motor.stepper import StepperMotor, STEP_DEGREES
from pi_turret.stepper_motor.stepper_slot import StepperMotorSlot
from pi_turret.sensor.button import yaw_button, pitch_button
from pi_turret.turret.mode import Mode
class Turret:
darts_per_second = 8
def __init__(self):
self.blaster = Hyperfire()
self.pitch_motor = StepperMotor(StepperMotorSlot.STEPPER_TWO)
self.yaw_motor = StepperMotor(StepperMotorSlot.STEPPER_ONE)
self.pitch = 0.0
self.yaw = 0.0
self.ammo = 22
self.mode = Mode.waiting
def calibrate(self):
self.mode = Mode.calibrating
yaw_sensor = yaw_button()
while not yaw_sensor.is_pressed():
self.move_left()
for _ in range(75):
self.move_right()
pitch_sensor = pitch_button()
while not pitch_sensor.is_pressed():
self.move_up()
for _ in range(21):
self.move_down()
self.pitch = 0.0
self.yaw = 0.0
self.mode = Mode.waiting
def burst_fire(self, duration: float, completion: Callable = None):
if self.mode == Mode.firing:
return
self.mode = Mode.firing
def fire():
self.blaster.burst_fire(duration=duration)
self.mode = Mode.waiting
self.ammo = self.ammo - round(self.darts_per_second * duration)
self.ammo = self.ammo if self.ammo > 0 else 0
if completion is not None:
completion()
burst_fire_thread = threading.Thread(target=fire, daemon=True)
burst_fire_thread.start()
def move(self, pitch: float, yaw: float):
move_pitch = pitch - self.pitch
move_yaw = yaw - self.yaw
while move_pitch != 0 or move_yaw != 0:
if (move_pitch > 0 and move_pitch < (STEP_DEGREES / 2)) or (move_pitch < 0 and -move_pitch < (STEP_DEGREES / 2)):
move_pitch = 0
if self.pitch_stop():
move_pitch = 0
if move_pitch > 0:
self.pitch_motor.one_step_backwards()
self.pitch += STEP_DEGREES
move_pitch -= STEP_DEGREES
elif move_pitch < 0:
self.pitch_motor.one_step_forward()
self.pitch -= STEP_DEGREES
move_pitch += STEP_DEGREES
if (move_yaw > 0 and move_yaw < (STEP_DEGREES / 2)) or (move_yaw < 0 and -move_yaw < (STEP_DEGREES / 2)):
move_yaw = 0
if self.yaw_stop():
move_yaw = 0
if move_yaw > 0:
self.yaw_motor.one_step_forward()
self.yaw += STEP_DEGREES
move_yaw -= STEP_DEGREES
elif move_yaw < 0:
self.yaw_motor.one_step_backwards()
self.yaw -= STEP_DEGREES
move_yaw += STEP_DEGREES
time.sleep(0.02)
def yaw_stop(self):
return self.yaw > config.YAW_MAX or self.yaw < -config.YAW_MAX
def pitch_stop(self):
return self.pitch > config.PITCH_MAX or self.pitch < -config.PITCH_MAX
def move_up(self):
self.pitch_motor.step_backward()
def move_down(self):
self.pitch_motor.step_forward()
def move_left(self):
self.yaw_motor.step_backward()
def move_right(self):
self.yaw_motor.step_forward()
if __name__ == "__main__":
TURRET = Turret()
TURRET.calibrate()
while True:
pass
| true | true |
f7fdb724517a005f9585d71e40aee91e0ba66b97 | 9,947 | py | Python | qa/rpc-tests/cfund-paymentrequest-state-accept.py | lordhace/navcoin-core | 50150863785850447582ce0d8b80538b273e2fde | [
"MIT"
] | 1 | 2020-07-31T19:43:05.000Z | 2020-07-31T19:43:05.000Z | qa/rpc-tests/cfund-paymentrequest-state-accept.py | Alonewolf-123/navcoin-core | e0879195bdf5373e5b1fcf4e970af0a8815660f6 | [
"MIT"
] | 1 | 2019-01-16T23:33:01.000Z | 2019-06-29T13:02:54.000Z | qa/rpc-tests/cfund-paymentrequest-state-accept.py | Alonewolf-123/navcoin-core | e0879195bdf5373e5b1fcf4e970af0a8815660f6 | [
"MIT"
] | 1 | 2019-07-16T09:13:48.000Z | 2019-07-16T09:13:48.000Z | #!/usr/bin/env python3
# Copyright (c) 2018 The Navcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NavCoinTestFramework
from test_framework.cfund_util import *
import time
class CommunityFundPaymentRequestsTest(NavCoinTestFramework):
"""Tests the payment request procedures of the Community fund."""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = self.setup_nodes()
self.is_network_split = split
def run_test(self):
self.nodes[0].staking(False)
activate_cfund(self.nodes[0])
self.nodes[0].donatefund(100)
# Create a proposal and accept by voting
proposalid0 = self.nodes[0].createproposal(self.nodes[0].getnewaddress(), 10, 3600, "test")["hash"]
locked_before = self.nodes[0].cfundstats()["funds"]["locked"]
end_cycle(self.nodes[0])
time.sleep(0.2)
self.nodes[0].proposalvote(proposalid0, "yes")
slow_gen(self.nodes[0], 1)
end_cycle(self.nodes[0])
locked_accepted = self.nodes[0].cfundstats()["funds"]["locked"]
time.sleep(0.2)
# Proposal should be accepted
assert(self.nodes[0].getproposal(proposalid0)["state"] == 1)
assert(self.nodes[0].getproposal(proposalid0)["status"] == "accepted")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == float(locked_before) + float(self.nodes[0].getproposal(proposalid0)["requestedAmount"]))
# Create a payment request
paymentrequestid0 = self.nodes[0].createpaymentrequest(proposalid0, 1, "test0")["hash"]
slow_gen(self.nodes[0], 1)
# Payment request initial state at beginning of cycle
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
# Vote enough yes votes, without enough quorum
total_votes = self.nodes[0].cfundstats()["consensus"]["minSumVotesPerVotingCycle"]
min_yes_votes = self.nodes[0].cfundstats()["consensus"]["votesAcceptPaymentRequestPercentage"]/100
yes_votes = int(total_votes * min_yes_votes) + 1
self.nodes[0].paymentrequestvote(paymentrequestid0, "yes")
slow_gen(self.nodes[0], yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "no")
slow_gen(self.nodes[0], total_votes - yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "remove")
# Should still be in pending
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
end_cycle(self.nodes[0])
time.sleep(0.2)
# Payment request initial state at beginning of cycle
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
# Vote enough quorum, but not enough positive votes
total_votes = self.nodes[0].cfundstats()["consensus"]["minSumVotesPerVotingCycle"] + 1
yes_votes = int(total_votes * min_yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "yes")
slow_gen(self.nodes[0], yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "no")
slow_gen(self.nodes[0], total_votes - yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "remove")
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
end_cycle(self.nodes[0])
time.sleep(0.2)
# Payment request initial state at beginning of cycle
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
# Vote enough quorum and enough positive votes
total_votes = self.nodes[0].cfundstats()["consensus"]["minSumVotesPerVotingCycle"] + 1
yes_votes = int(total_votes * min_yes_votes) + 1
self.nodes[0].paymentrequestvote(paymentrequestid0, "yes")
slow_gen(self.nodes[0], yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "no")
blocks = slow_gen(self.nodes[0], total_votes - yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "remove")
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "accepted waiting for end of voting period")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
time.sleep(0.2)
# Revert last vote and check status
self.nodes[0].invalidateblock(blocks[-1])
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
self.nodes[0].cfundstats()
# Vote again
self.nodes[0].paymentrequestvote(paymentrequestid0, "yes")
slow_gen(self.nodes[0], 1)
self.nodes[0].paymentrequestvote(paymentrequestid0, "remove")
# Move to a new cycle...
time.sleep(0.2)
end_cycle(self.nodes[0])
blocks = slow_gen(self.nodes[0], 1)
locked_after_payment = float(locked_accepted) - float(self.nodes[0].getpaymentrequest(paymentrequestid0)["requestedAmount"])
# Paymentrequest must be accepted now
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "accepted")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_after_payment)
# Check that paymentrequest remains in accepted state after the max number of cycles
cycles_to_expire = self.nodes[0].cfundstats()["consensus"]["maxCountVotingCyclePaymentRequests"]
for idx in range(cycles_to_expire):
end_cycle(self.nodes[0])
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "accepted")
# Create multiple payment requests
paymentrequestid1 = self.nodes[0].createpaymentrequest(proposalid0, 4, "test1")["hash"]
paymentrequestid2 = self.nodes[0].createpaymentrequest(proposalid0, 4, "test2")["hash"]
slow_gen(self.nodes[0], 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["status"] == "pending")
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["status"] == "pending")
self.nodes[0].paymentrequestvote(paymentrequestid1, "yes")
self.nodes[0].paymentrequestvote(paymentrequestid2, "yes")
slow_gen(self.nodes[0], yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid1, "no")
self.nodes[0].paymentrequestvote(paymentrequestid2, "no")
blocks = slow_gen(self.nodes[0], total_votes - yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid1, "remove")
self.nodes[0].paymentrequestvote(paymentrequestid2, "remove")
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["status"] == "accepted waiting for end of voting period")
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["status"] == "accepted waiting for end of voting period")
time.sleep(0.2)
end_cycle(self.nodes[0])
blocks = slow_gen(self.nodes[0], 1)
# Check status after acceptance
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["state"] == 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["status"] == "accepted")
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["state"] == 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["status"] == "accepted")
locked_after_2nd_payment = (locked_after_payment -
float(self.nodes[0].getpaymentrequest(paymentrequestid1)["requestedAmount"]) -
float(self.nodes[0].getpaymentrequest(paymentrequestid2)["requestedAmount"]))
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_after_2nd_payment)
# Create and vote on payment request more than the total proposal amount, must throw "JSONRPC error: Invalid amount."
paymentrequest_not_created = True
try:
paymentrequestid3 = self.nodes[0].createpaymentrequest(proposalid0, 2, "test3")["hash"]
paymentrequest_not_created = False
except JSONRPCException:
pass
assert(paymentrequest_not_created)
if __name__ == '__main__':
CommunityFundPaymentRequestsTest().main()
| 45.420091 | 152 | 0.674977 |
from test_framework.test_framework import NavCoinTestFramework
from test_framework.cfund_util import *
import time
class CommunityFundPaymentRequestsTest(NavCoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = self.setup_nodes()
self.is_network_split = split
def run_test(self):
self.nodes[0].staking(False)
activate_cfund(self.nodes[0])
self.nodes[0].donatefund(100)
proposalid0 = self.nodes[0].createproposal(self.nodes[0].getnewaddress(), 10, 3600, "test")["hash"]
locked_before = self.nodes[0].cfundstats()["funds"]["locked"]
end_cycle(self.nodes[0])
time.sleep(0.2)
self.nodes[0].proposalvote(proposalid0, "yes")
slow_gen(self.nodes[0], 1)
end_cycle(self.nodes[0])
locked_accepted = self.nodes[0].cfundstats()["funds"]["locked"]
time.sleep(0.2)
assert(self.nodes[0].getproposal(proposalid0)["state"] == 1)
assert(self.nodes[0].getproposal(proposalid0)["status"] == "accepted")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == float(locked_before) + float(self.nodes[0].getproposal(proposalid0)["requestedAmount"]))
paymentrequestid0 = self.nodes[0].createpaymentrequest(proposalid0, 1, "test0")["hash"]
slow_gen(self.nodes[0], 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
total_votes = self.nodes[0].cfundstats()["consensus"]["minSumVotesPerVotingCycle"]
min_yes_votes = self.nodes[0].cfundstats()["consensus"]["votesAcceptPaymentRequestPercentage"]/100
yes_votes = int(total_votes * min_yes_votes) + 1
self.nodes[0].paymentrequestvote(paymentrequestid0, "yes")
slow_gen(self.nodes[0], yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "no")
slow_gen(self.nodes[0], total_votes - yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "remove")
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
end_cycle(self.nodes[0])
time.sleep(0.2)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
total_votes = self.nodes[0].cfundstats()["consensus"]["minSumVotesPerVotingCycle"] + 1
yes_votes = int(total_votes * min_yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "yes")
slow_gen(self.nodes[0], yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "no")
slow_gen(self.nodes[0], total_votes - yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "remove")
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
end_cycle(self.nodes[0])
time.sleep(0.2)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
total_votes = self.nodes[0].cfundstats()["consensus"]["minSumVotesPerVotingCycle"] + 1
yes_votes = int(total_votes * min_yes_votes) + 1
self.nodes[0].paymentrequestvote(paymentrequestid0, "yes")
slow_gen(self.nodes[0], yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "no")
blocks = slow_gen(self.nodes[0], total_votes - yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid0, "remove")
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "accepted waiting for end of voting period")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
time.sleep(0.2)
self.nodes[0].invalidateblock(blocks[-1])
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "pending")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_accepted)
self.nodes[0].cfundstats()
self.nodes[0].paymentrequestvote(paymentrequestid0, "yes")
slow_gen(self.nodes[0], 1)
self.nodes[0].paymentrequestvote(paymentrequestid0, "remove")
time.sleep(0.2)
end_cycle(self.nodes[0])
blocks = slow_gen(self.nodes[0], 1)
locked_after_payment = float(locked_accepted) - float(self.nodes[0].getpaymentrequest(paymentrequestid0)["requestedAmount"])
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "accepted")
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_after_payment)
cycles_to_expire = self.nodes[0].cfundstats()["consensus"]["maxCountVotingCyclePaymentRequests"]
for idx in range(cycles_to_expire):
end_cycle(self.nodes[0])
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["state"] == 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid0)["status"] == "accepted")
paymentrequestid1 = self.nodes[0].createpaymentrequest(proposalid0, 4, "test1")["hash"]
paymentrequestid2 = self.nodes[0].createpaymentrequest(proposalid0, 4, "test2")["hash"]
slow_gen(self.nodes[0], 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["status"] == "pending")
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["status"] == "pending")
self.nodes[0].paymentrequestvote(paymentrequestid1, "yes")
self.nodes[0].paymentrequestvote(paymentrequestid2, "yes")
slow_gen(self.nodes[0], yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid1, "no")
self.nodes[0].paymentrequestvote(paymentrequestid2, "no")
blocks = slow_gen(self.nodes[0], total_votes - yes_votes)
self.nodes[0].paymentrequestvote(paymentrequestid1, "remove")
self.nodes[0].paymentrequestvote(paymentrequestid2, "remove")
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["status"] == "accepted waiting for end of voting period")
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["state"] == 0)
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["status"] == "accepted waiting for end of voting period")
time.sleep(0.2)
end_cycle(self.nodes[0])
blocks = slow_gen(self.nodes[0], 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["state"] == 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid1)["status"] == "accepted")
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["state"] == 1)
assert(self.nodes[0].getpaymentrequest(paymentrequestid2)["status"] == "accepted")
locked_after_2nd_payment = (locked_after_payment -
float(self.nodes[0].getpaymentrequest(paymentrequestid1)["requestedAmount"]) -
float(self.nodes[0].getpaymentrequest(paymentrequestid2)["requestedAmount"]))
assert(self.nodes[0].cfundstats()["funds"]["locked"] == locked_after_2nd_payment)
paymentrequest_not_created = True
try:
paymentrequestid3 = self.nodes[0].createpaymentrequest(proposalid0, 2, "test3")["hash"]
paymentrequest_not_created = False
except JSONRPCException:
pass
assert(paymentrequest_not_created)
if __name__ == '__main__':
CommunityFundPaymentRequestsTest().main()
| true | true |
f7fdb7a417cb8178de8a72c418a00fea091d0e4e | 9,602 | py | Python | ranking/semi_hard_triplet.py | ahmdtaha/tf_retrieval_baseline | 31b1588f888cecc1d4287f77bd046314956482d5 | [
"Apache-2.0"
] | 37 | 2019-06-01T02:11:48.000Z | 2021-12-31T06:27:42.000Z | ranking/semi_hard_triplet.py | ahmdtaha/tf_retrieval_baseline | 31b1588f888cecc1d4287f77bd046314956482d5 | [
"Apache-2.0"
] | 1 | 2019-06-21T03:20:59.000Z | 2019-09-03T14:20:04.000Z | ranking/semi_hard_triplet.py | ahmdtaha/tf_retrieval_baseline | 31b1588f888cecc1d4287f77bd046314956482d5 | [
"Apache-2.0"
] | 6 | 2019-10-11T10:21:56.000Z | 2022-03-09T06:22:57.000Z | import numbers
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import dtypes
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(data - axis_maximums, mask), dim,
keepdims=True) + axis_maximums
return masked_minimums
def masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(data - axis_minimums, mask), dim,
keepdims=True) + axis_minimums
return masked_maximums
def all_diffs(a, b):
""" Returns a tensor of all combinations of a - b.
Args:
a (2D tensor): A batch of vectors shaped (B1, F).
b (2D tensor): A batch of vectors shaped (B2, F).
Returns:
The matrix of all pairwise differences between all vectors in `a` and in
`b`, will be of shape (B1, B2).
Note:
For convenience, if either `a` or `b` is a `Distribution` object, its
mean is used.
"""
return tf.expand_dims(a, axis=1) - tf.expand_dims(b, axis=0)
def cdist(a, b, metric='euclidean'):
"""Similar to scipy.spatial's cdist, but symbolic.
The currently supported metrics can be listed as `cdist.supported_metrics` and are:
- 'euclidean', although with a fudge-factor epsilon.
- 'sqeuclidean', the squared euclidean.
- 'cityblock', the manhattan or L1 distance.
Args:
a (2D tensor): The left-hand side, shaped (B1, F).
b (2D tensor): The right-hand side, shaped (B2, F).
metric (string): Which distance metric to use, see notes.
Returns:
The matrix of all pairwise distances between all vectors in `a` and in
`b`, will be of shape (B1, B2).
Note:
When a square root is taken (such as in the Euclidean case), a small
epsilon is added because the gradient of the square-root at zero is
undefined. Thus, it will never return exact zero in these cases.
"""
with tf.name_scope("cdist"):
diffs = all_diffs(a, b)
if metric == 'sqeuclidean':
return tf.reduce_sum(tf.square(diffs), axis=-1)
elif metric == 'euclidean':
return tf.sqrt(tf.reduce_sum(tf.square(diffs), axis=-1) + 1e-12)
elif metric == 'cityblock':
return tf.reduce_sum(tf.abs(diffs), axis=-1)
elif metric == 'cosine':
# https://stackoverflow.com/questions/48485373/pairwise-cosine-similarity-using-tensorflow
# normalized_input = tf.nn.l2_normalize(a, dim=1)
# Embedding are assumed to be normalized
prod = tf.matmul(a, b,adjoint_b=True) # transpose second matrix
return 1 - prod
else:
raise NotImplementedError(
'The following metric is not implemented by `cdist` yet: {}'.format(metric))
def pairwise_distance(feature, squared=False):
"""Computes the pairwise distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
Args:
feature: 2-D Tensor of size [number of data, feature dimension].
squared: Boolean, whether or not to square the pairwise distances.
Returns:
pairwise_distances: 2-D Tensor of size [number of data, number of data].
"""
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
math_ops.reduce_sum(
math_ops.square(array_ops.transpose(feature)),
axis=[0],
keepdims=True)) - 2.0 * math_ops.matmul(feature,
array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def triplet_semihard_loss(embeddings,labels, margin=1.0):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings with
the same labels) to be smaller than the minimum negative distance among
which are at least greater than the positive distance plus the margin constant
(called semi-hard negative) in the mini-batch. If no such negative exists,
uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
triplet_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
#pdist_matrix = cdist(embeddings, embeddings, metric=metric)
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
# Compute the mask.
## Is there any element with different label and is farther than me? If Yes, then there exists a semi-hard negative
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(
array_ops.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
tf.cast(mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = tf.cast(adjacency_not, dtype=dtypes.float32)
mask = tf.cast(mask, dtype=dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
if isinstance(margin, numbers.Real):
# diff = tf.maximum(diff + margin, 0.0)
loss_mat = pdist_matrix - semi_hard_negatives + margin
elif margin == 'soft':
# diff = tf.nn.softplus(diff)
loss_mat = pdist_matrix - semi_hard_negatives
elif margin.lower() == 'none':
pass
else:
raise NotImplementedError(
'The margin {} is not implemented in batch_hard'.format(margin))
mask_positives = tf.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
if isinstance(margin, numbers.Real):
print('Margin is real')
triplet_loss_result = math_ops.maximum(tf.boolean_mask(loss_mat, tf.cast(mask_positives, tf.bool)),
0.0)
assert_op = tf.Assert(tf.equal(tf.rank(triplet_loss_result), 1), ['Rank of image must be equal to 1.'])
with tf.control_dependencies([assert_op]):
triplet_loss = triplet_loss_result
elif margin == 'soft':
triplet_loss_result = tf.nn.softplus(tf.boolean_mask(loss_mat, tf.cast(mask_positives, tf.bool)))
assert_op = tf.Assert(tf.equal(tf.rank(triplet_loss_result), 1), ['Rank of image must be equal to 1.'])
with tf.control_dependencies([assert_op]):
triplet_loss = triplet_loss_result
elif margin.lower() == 'none':
pass
else:
raise NotImplementedError(
'The margin {} is not implemented in batch_hard'.format(margin))
return triplet_loss
| 38.408 | 117 | 0.687878 | import numbers
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import dtypes
def masked_minimum(data, mask, dim=1):
axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(data - axis_maximums, mask), dim,
keepdims=True) + axis_maximums
return masked_minimums
def masked_maximum(data, mask, dim=1):
axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(data - axis_minimums, mask), dim,
keepdims=True) + axis_minimums
return masked_maximums
def all_diffs(a, b):
return tf.expand_dims(a, axis=1) - tf.expand_dims(b, axis=0)
def cdist(a, b, metric='euclidean'):
with tf.name_scope("cdist"):
diffs = all_diffs(a, b)
if metric == 'sqeuclidean':
return tf.reduce_sum(tf.square(diffs), axis=-1)
elif metric == 'euclidean':
return tf.sqrt(tf.reduce_sum(tf.square(diffs), axis=-1) + 1e-12)
elif metric == 'cityblock':
return tf.reduce_sum(tf.abs(diffs), axis=-1)
elif metric == 'cosine':
prod = tf.matmul(a, b,adjoint_b=True)
return 1 - prod
else:
raise NotImplementedError(
'The following metric is not implemented by `cdist` yet: {}'.format(metric))
def pairwise_distance(feature, squared=False):
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
math_ops.reduce_sum(
math_ops.square(array_ops.transpose(feature)),
axis=[0],
keepdims=True)) - 2.0 * math_ops.matmul(feature,
array_ops.transpose(feature))
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
num_data = array_ops.shape(feature)[0]
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def triplet_semihard_loss(embeddings,labels, margin=1.0):
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
pdist_matrix = pairwise_distance(embeddings, squared=True)
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
s.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
tf.cast(mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = tf.cast(adjacency_not, dtype=dtypes.float32)
mask = tf.cast(mask, dtype=dtypes.float32)
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
if isinstance(margin, numbers.Real):
loss_mat = pdist_matrix - semi_hard_negatives + margin
elif margin == 'soft':
loss_mat = pdist_matrix - semi_hard_negatives
elif margin.lower() == 'none':
pass
else:
raise NotImplementedError(
'The margin {} is not implemented in batch_hard'.format(margin))
mask_positives = tf.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
if isinstance(margin, numbers.Real):
print('Margin is real')
triplet_loss_result = math_ops.maximum(tf.boolean_mask(loss_mat, tf.cast(mask_positives, tf.bool)),
0.0)
assert_op = tf.Assert(tf.equal(tf.rank(triplet_loss_result), 1), ['Rank of image must be equal to 1.'])
with tf.control_dependencies([assert_op]):
triplet_loss = triplet_loss_result
elif margin == 'soft':
triplet_loss_result = tf.nn.softplus(tf.boolean_mask(loss_mat, tf.cast(mask_positives, tf.bool)))
assert_op = tf.Assert(tf.equal(tf.rank(triplet_loss_result), 1), ['Rank of image must be equal to 1.'])
with tf.control_dependencies([assert_op]):
triplet_loss = triplet_loss_result
elif margin.lower() == 'none':
pass
else:
raise NotImplementedError(
'The margin {} is not implemented in batch_hard'.format(margin))
return triplet_loss
| true | true |
f7fdb844016bb74899c26109edc632e87e2fa88c | 8,721 | py | Python | mesa/custom_bn.py | zhuang-group/Mesa | 8b7a0db0461de7df5c99d644a60cc7704c67a02a | [
"Apache-2.0"
] | 98 | 2021-11-23T03:38:52.000Z | 2022-02-17T00:26:22.000Z | mesa/custom_bn.py | zip-group/Mesa | 8b7a0db0461de7df5c99d644a60cc7704c67a02a | [
"Apache-2.0"
] | 2 | 2021-12-02T11:45:24.000Z | 2022-01-18T19:46:57.000Z | mesa/custom_bn.py | zip-group/Mesa | 8b7a0db0461de7df5c99d644a60cc7704c67a02a | [
"Apache-2.0"
] | 6 | 2021-11-23T22:49:21.000Z | 2022-03-01T09:16:45.000Z | # Copyright (c) 2021-present, Zhuang AI Group.
# All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
if 'mesa' not in __name__:
import custom_quant
import packbit
import native
else:
from . import custom_quant
from . import native
from . import packbit
def SyncBatchNorm_forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):
if not input.is_contiguous(memory_format=torch.channels_last):
input = input.contiguous()
if weight is not None:
weight = weight.contiguous()
size = int(input.numel() // input.size(1))
if size == 1 and world_size < 2:
raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size))
# calculate mean/invstd for input.
mean, invstd = torch.batch_norm_stats(input, eps)
count = torch.full((1,), input.numel() // input.size(1), dtype=mean.dtype, device=mean.device)
num_channels = input.shape[1]
# C, C, 1 -> (2C + 1)
combined = torch.cat([mean, invstd, count], dim=0)
# world_size * (2C + 1)
combined_list = [ torch.empty_like(combined) for k in range(world_size) ]
# Use allgather instead of allreduce since I don't trust in-place operations ..
dist.all_gather(combined_list, combined, process_group, async_op=False)
combined = torch.stack(combined_list, dim=0)
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
# calculate global mean & invstd
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
count_all.view(-1)
)
self.process_group = process_group
# apply element-wise normalization
out = torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
return out
def SyncBatchNorm_backward(saved_input, weight, mean, invstd, count_tensor, process_group, needs_input_grad, grad_output):
if not grad_output.is_contiguous(memory_format=torch.channels_last):
grad_output = grad_output.contiguous()
#saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
#process_group = self.process_group
grad_input = grad_weight = grad_bias = None
# calculate local stats as well as grad_weight / grad_bias
sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
True,
needs_input_grad[0],
needs_input_grad[1]
)
if True:
# synchronizing stats used to calculate input gradient.
num_channels = sum_dy.shape[0]
combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
torch.distributed.all_reduce(
combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
# backward pass for gradient calculation
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
return grad_input, grad_weight, grad_bias #, None, None, None, None, None, None
def bn_pre_forward(self, input):
self._check_input_dim(input)
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore
self.num_batches_tracked = self.num_batches_tracked + 1 # type: ignore
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
assert self.running_mean is None or isinstance(self.running_mean, torch.Tensor)
assert self.running_var is None or isinstance(self.running_var, torch.Tensor)
running_mean = self.running_mean if not self.training or self.track_running_stats else None
running_var = self.running_var if not self.training or self.track_running_stats else None
need_sync = bn_training and input.is_cuda and hasattr(self, 'process_group')
process_group = None
world_size = 1
if need_sync:
process_group = torch.distributed.group.WORLD
if self.process_group:
process_group = self.process_group
try:
world_size = torch.distributed.get_world_size(process_group)
except AssertionError:
world_size = 1
need_sync = world_size > 1
# fallback to framework BN when synchronization is not necessary
if need_sync:
if not self.ddp_gpu_size:
raise AttributeError('SyncBatchNorm is only supported within torch.nn.parallel.DistributedDataParallel')
return exponential_average_factor, bn_training, running_mean, running_var, need_sync, process_group, world_size
class batchnorm2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias, mean, var, average_factor, training, need_sync, process_group, world_size, eps,
clip_val, level, iteration, ema_decay, quant_groups, shift):
if need_sync:
# currently not support
output = SyncBatchNorm_forward(ctx, input, bn_weight, bn_bias, bn_mean, bn_var, bn_eps, average_factor, process_group, world_size)
else:
output, save_mean, save_var, reverse = native.batch_norm_forward(input, weight, bias, mean, var, training, average_factor, eps)
if training:
ctx.bn_parameter = (weight, bias, mean, var, save_mean, save_var, reverse, eps)
custom_quant.Quant.forward(ctx, input, clip_val, level, iteration, ema_decay, quant_groups, shift)
if training:
ctx.need_sync = need_sync
return output
@staticmethod
def backward(ctx, grad_output):
if ctx.need_sync:
grad_output, grad_bn_weight, grad_bn_bias = SyncBatchNorm_backward(input, bn_weight, bn_mean, bn_invstd, bn_count_all, \
bn_process_group, ctx.needs_input_grad[7:9], grad_output)
else:
weight, bias, running_mean, running_var, save_mean, save_var, reverse, eps = ctx.bn_parameter
# input = ctx.bn_input
input = custom_quant.Quant.restore(ctx)
grad_input, grad_weight, grad_bias = native.batch_norm_backward(input, grad_output, weight, running_mean, running_var, \
save_mean, save_var, 0, reverse)
ctx.bn_input = None
ctx.bn_parameter = None
ctx.need_sync = None
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class BatchNorm2d(nn.BatchNorm2d, custom_quant.Quant):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, args=None, logger=None, quant_groups=1):
super(BatchNorm2d, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.repr = super(BatchNorm2d, self).__repr__()
custom_quant.Quant.__init__(self, args=args, logger=logger, quant_groups=quant_groups)
self.tag = 'bn'
def __repr__(self):
return self.__str__()
def forward(self, x):
if self.enable and self.training:
assert x.is_cuda, "Not supprot cpu mode yet"
average_factor, training, mean, var, need_sync, process_group, world_size = bn_pre_forward(self, x)
y = batchnorm2d.apply(x, self.weight, self.bias, mean, var, average_factor, training, need_sync, process_group, world_size, self.eps,
self.clip_val, self.level, self.iteration, self.ema_decay, self.quant_groups, self.shift)
else:
y = super().forward(x)
return y
if __name__ == "__main__":
model = BatchNorm2d(64, args=None)
input = torch.randn(4, 100, 35, 45)
from test import test
test(model)
| 41.528571 | 145 | 0.6724 |
import torch
import torch.nn as nn
import torch.nn.functional as F
if 'mesa' not in __name__:
import custom_quant
import packbit
import native
else:
from . import custom_quant
from . import native
from . import packbit
def SyncBatchNorm_forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):
if not input.is_contiguous(memory_format=torch.channels_last):
input = input.contiguous()
if weight is not None:
weight = weight.contiguous()
size = int(input.numel() // input.size(1))
if size == 1 and world_size < 2:
raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size))
mean, invstd = torch.batch_norm_stats(input, eps)
count = torch.full((1,), input.numel() // input.size(1), dtype=mean.dtype, device=mean.device)
num_channels = input.shape[1]
combined = torch.cat([mean, invstd, count], dim=0)
combined_list = [ torch.empty_like(combined) for k in range(world_size) ]
dist.all_gather(combined_list, combined, process_group, async_op=False)
combined = torch.stack(combined_list, dim=0)
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
# calculate global mean & invstd
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
count_all.view(-1)
)
self.process_group = process_group
# apply element-wise normalization
out = torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
return out
def SyncBatchNorm_backward(saved_input, weight, mean, invstd, count_tensor, process_group, needs_input_grad, grad_output):
if not grad_output.is_contiguous(memory_format=torch.channels_last):
grad_output = grad_output.contiguous()
#saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
#process_group = self.process_group
grad_input = grad_weight = grad_bias = None
# calculate local stats as well as grad_weight / grad_bias
sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
True,
needs_input_grad[0],
needs_input_grad[1]
)
if True:
# synchronizing stats used to calculate input gradient.
num_channels = sum_dy.shape[0]
combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
torch.distributed.all_reduce(
combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
# backward pass for gradient calculation
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_tensor
)
return grad_input, grad_weight, grad_bias #, None, None, None, None, None, None
def bn_pre_forward(self, input):
self._check_input_dim(input)
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore
self.num_batches_tracked = self.num_batches_tracked + 1 # type: ignore
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
assert self.running_mean is None or isinstance(self.running_mean, torch.Tensor)
assert self.running_var is None or isinstance(self.running_var, torch.Tensor)
running_mean = self.running_mean if not self.training or self.track_running_stats else None
running_var = self.running_var if not self.training or self.track_running_stats else None
need_sync = bn_training and input.is_cuda and hasattr(self, 'process_group')
process_group = None
world_size = 1
if need_sync:
process_group = torch.distributed.group.WORLD
if self.process_group:
process_group = self.process_group
try:
world_size = torch.distributed.get_world_size(process_group)
except AssertionError:
world_size = 1
need_sync = world_size > 1
# fallback to framework BN when synchronization is not necessary
if need_sync:
if not self.ddp_gpu_size:
raise AttributeError('SyncBatchNorm is only supported within torch.nn.parallel.DistributedDataParallel')
return exponential_average_factor, bn_training, running_mean, running_var, need_sync, process_group, world_size
class batchnorm2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias, mean, var, average_factor, training, need_sync, process_group, world_size, eps,
clip_val, level, iteration, ema_decay, quant_groups, shift):
if need_sync:
# currently not support
output = SyncBatchNorm_forward(ctx, input, bn_weight, bn_bias, bn_mean, bn_var, bn_eps, average_factor, process_group, world_size)
else:
output, save_mean, save_var, reverse = native.batch_norm_forward(input, weight, bias, mean, var, training, average_factor, eps)
if training:
ctx.bn_parameter = (weight, bias, mean, var, save_mean, save_var, reverse, eps)
custom_quant.Quant.forward(ctx, input, clip_val, level, iteration, ema_decay, quant_groups, shift)
if training:
ctx.need_sync = need_sync
return output
@staticmethod
def backward(ctx, grad_output):
if ctx.need_sync:
grad_output, grad_bn_weight, grad_bn_bias = SyncBatchNorm_backward(input, bn_weight, bn_mean, bn_invstd, bn_count_all, \
bn_process_group, ctx.needs_input_grad[7:9], grad_output)
else:
weight, bias, running_mean, running_var, save_mean, save_var, reverse, eps = ctx.bn_parameter
# input = ctx.bn_input
input = custom_quant.Quant.restore(ctx)
grad_input, grad_weight, grad_bias = native.batch_norm_backward(input, grad_output, weight, running_mean, running_var, \
save_mean, save_var, 0, reverse)
ctx.bn_input = None
ctx.bn_parameter = None
ctx.need_sync = None
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class BatchNorm2d(nn.BatchNorm2d, custom_quant.Quant):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, args=None, logger=None, quant_groups=1):
super(BatchNorm2d, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.repr = super(BatchNorm2d, self).__repr__()
custom_quant.Quant.__init__(self, args=args, logger=logger, quant_groups=quant_groups)
self.tag = 'bn'
def __repr__(self):
return self.__str__()
def forward(self, x):
if self.enable and self.training:
assert x.is_cuda, "Not supprot cpu mode yet"
average_factor, training, mean, var, need_sync, process_group, world_size = bn_pre_forward(self, x)
y = batchnorm2d.apply(x, self.weight, self.bias, mean, var, average_factor, training, need_sync, process_group, world_size, self.eps,
self.clip_val, self.level, self.iteration, self.ema_decay, self.quant_groups, self.shift)
else:
y = super().forward(x)
return y
if __name__ == "__main__":
model = BatchNorm2d(64, args=None)
input = torch.randn(4, 100, 35, 45)
from test import test
test(model)
| true | true |
f7fdb8f8135d355ca7cd3f450dc773ff7936b7e1 | 240 | py | Python | wtforms_webwidgets/__init__.py | mdornseif/wtforms-webwidgets | 457bc20ba82c5b1c7638b58cf2fb9cd4f46f1a10 | [
"MIT"
] | 4 | 2015-07-29T13:21:56.000Z | 2021-04-27T12:26:16.000Z | wtforms_webwidgets/__init__.py | mdornseif/wtforms-webwidgets | 457bc20ba82c5b1c7638b58cf2fb9cd4f46f1a10 | [
"MIT"
] | 2 | 2015-06-09T05:59:35.000Z | 2018-01-23T18:09:27.000Z | wtforms_webwidgets/__init__.py | mdornseif/wtforms-webwidgets | 457bc20ba82c5b1c7638b58cf2fb9cd4f46f1a10 | [
"MIT"
] | 3 | 2015-06-08T13:44:20.000Z | 2019-08-08T10:34:12.000Z | """
WTForms-Webwidgets
==================
This package aims to one day eventually contain comprehensive widgets for all the
common web UI frameworks.
Currently this module contains widgets for:
* Bootstrap
"""
from .common import *
| 18.461538 | 81 | 0.704167 |
from .common import *
| true | true |
f7fdb94d3d1973655d4a1b1decb590cc7e7b1385 | 453 | py | Python | qvapay/models/invoice.py | codeshard/qvapay-python | 6483345e8b1236aacd9252743634e5f12ab3187b | [
"MIT"
] | 1 | 2021-08-29T01:09:08.000Z | 2021-08-29T01:09:08.000Z | qvapay/models/invoice.py | codeshard/qvapay-python | 6483345e8b1236aacd9252743634e5f12ab3187b | [
"MIT"
] | null | null | null | qvapay/models/invoice.py | codeshard/qvapay-python | 6483345e8b1236aacd9252743634e5f12ab3187b | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from uuid import UUID
from dataclasses_json import config, dataclass_json
@dataclass_json
@dataclass
class Invoice:
"""
QvaPay invoice
"""
app_id: UUID
amount: float
description: str
remote_id: str
signed = None
transaction_uuid: UUID = field(metadata=config(field_name="transation_uuid"))
url: str
signed_url: str = field(metadata=config(field_name="signedUrl"))
| 20.590909 | 81 | 0.717439 | from dataclasses import dataclass, field
from uuid import UUID
from dataclasses_json import config, dataclass_json
@dataclass_json
@dataclass
class Invoice:
app_id: UUID
amount: float
description: str
remote_id: str
signed = None
transaction_uuid: UUID = field(metadata=config(field_name="transation_uuid"))
url: str
signed_url: str = field(metadata=config(field_name="signedUrl"))
| true | true |
f7fdb9f37cf41c70aad93b792f4495cbd6ca2290 | 826 | py | Python | scheduled_bots/disease_ontology/disease_ontology/deprecated_code/bot_ng.py | egonw/scheduled-bots | 1f00461d03e341568075fdcd90adb878ccaccbc3 | [
"MIT"
] | 6 | 2017-05-04T01:04:26.000Z | 2022-03-04T12:22:17.000Z | scheduled_bots/disease_ontology/disease_ontology/deprecated_code/bot_ng.py | egonw/scheduled-bots | 1f00461d03e341568075fdcd90adb878ccaccbc3 | [
"MIT"
] | 55 | 2017-03-14T21:16:44.000Z | 2022-03-02T12:39:14.000Z | scheduled_bots/disease_ontology/disease_ontology/deprecated_code/bot_ng.py | egonw/scheduled-bots | 1f00461d03e341568075fdcd90adb878ccaccbc3 | [
"MIT"
] | 13 | 2017-02-10T21:40:06.000Z | 2022-01-18T01:27:52.000Z | import json
import pprint
import time
with open("doid.json", "r") as read_file:
data = json.load(read_file)
i = 0
for graphs in data["graphs"]:
if graphs["id"] == 'http://purl.obolibrary.org/obo/doid.owl':
print('================')
for node in graphs["nodes"]:
if node['type'] == 'CLASS':
i += 1
try:
print('doid', node['id'])
print('label', node['lbl'])
if 'meta' in node.keys():
if "synonyms" in node['meta'].keys():
for synonym in node['meta']['synonyms']:
print(synonym['pred'], ":", synonym["val"])
except:
pprint.pprint(node)
time.sleep(5)
print(i)
| 30.592593 | 75 | 0.428571 | import json
import pprint
import time
with open("doid.json", "r") as read_file:
data = json.load(read_file)
i = 0
for graphs in data["graphs"]:
if graphs["id"] == 'http://purl.obolibrary.org/obo/doid.owl':
print('================')
for node in graphs["nodes"]:
if node['type'] == 'CLASS':
i += 1
try:
print('doid', node['id'])
print('label', node['lbl'])
if 'meta' in node.keys():
if "synonyms" in node['meta'].keys():
for synonym in node['meta']['synonyms']:
print(synonym['pred'], ":", synonym["val"])
except:
pprint.pprint(node)
time.sleep(5)
print(i)
| true | true |
f7fdbb705526be01bcfb410cef4df170c6da28a6 | 9,877 | py | Python | kfac/autograd_hacks.py | shyhuai/kfac_pytorch | f5a99366fa94345697432a8aabdc5d370f68d06f | [
"MIT"
] | null | null | null | kfac/autograd_hacks.py | shyhuai/kfac_pytorch | f5a99366fa94345697432a8aabdc5d370f68d06f | [
"MIT"
] | null | null | null | kfac/autograd_hacks.py | shyhuai/kfac_pytorch | f5a99366fa94345697432a8aabdc5d370f68d06f | [
"MIT"
] | 1 | 2020-12-30T03:06:40.000Z | 2020-12-30T03:06:40.000Z | """
Library for extracting interesting quantites from autograd, see README.md
Not thread-safe because of module-level variables
Notation:
o: number of output classes (exact Hessian), number of Hessian samples (sampled Hessian)
n: batch-size
do: output dimension (output channels for convolution)
di: input dimension (input channels for convolution)
Hi: per-example Hessian of matmul, shaped as matrix of [dim, dim], indices have been row-vectorized
Hi_bias: per-example Hessian of bias
Oh, Ow: output height, output width (convolution)
Kh, Kw: kernel height, kernel width (convolution)
Jb: batch output Jacobian of matmul, output sensitivity for example,class pair, [o, n, ....]
Jb_bias: as above, but for bias
A, activations: inputs into current layer
B, backprops: backprop values (aka Lop aka Jacobian-vector product) observed at current layer
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
_supported_layers = ['Linear', 'Conv2d'] # Supported layer class types
_hooks_disabled: bool = False # work-around for https://github.com/pytorch/pytorch/issues/25723
_enforce_fresh_backprop: bool = False # global switch to catch double backprop errors on Hessian computation
def add_hooks(model: nn.Module) -> None:
"""
Adds hooks to model to save activations and backprop values.
The hooks will
1. save activations into param.activations during forward pass
2. append backprops to params.backprops_list during backward pass.
Call "remove_hooks(model)" to disable this.
Args:
model:
"""
global _hooks_disabled
_hooks_disabled = False
handles = []
for layer in model.modules():
if _layer_type(layer) in _supported_layers:
handles.append(layer.register_forward_hook(_capture_activations))
handles.append(layer.register_backward_hook(_capture_backprops))
model.__dict__.setdefault('autograd_hacks_hooks', []).extend(handles)
def remove_hooks(model: nn.Module) -> None:
"""
Remove hooks added by add_hooks(model)
"""
assert model == 0, "not working, remove this after fix to https://github.com/pytorch/pytorch/issues/25723"
if not hasattr(model, 'autograd_hacks_hooks'):
print("Warning, asked to remove hooks, but no hooks found")
else:
for handle in model.autograd_hacks_hooks:
handle.remove()
del model.autograd_hacks_hooks
def disable_hooks() -> None:
"""
Globally disable all hooks installed by this library.
"""
global _hooks_disabled
_hooks_disabled = True
def enable_hooks() -> None:
"""the opposite of disable_hooks()"""
global _hooks_disabled
_hooks_disabled = False
def is_supported(layer: nn.Module) -> bool:
"""Check if this layer is supported"""
return _layer_type(layer) in _supported_layers
def _layer_type(layer: nn.Module) -> str:
return layer.__class__.__name__
def _capture_activations(layer: nn.Module, input: List[torch.Tensor], output: torch.Tensor):
"""Save activations into layer.activations in forward pass"""
if _hooks_disabled:
return
assert _layer_type(layer) in _supported_layers, "Hook installed on unsupported layer, this shouldn't happen"
setattr(layer, "activations", input[0].detach())
def _capture_backprops(layer: nn.Module, _input, output):
"""Append backprop to layer.backprops_list in backward pass."""
global _enforce_fresh_backprop
if _hooks_disabled:
return
if _enforce_fresh_backprop:
assert not hasattr(layer, 'backprops_list'), "Seeing result of previous backprop, use clear_backprops(model) to clear"
_enforce_fresh_backprop = False
if not hasattr(layer, 'backprops_list'):
setattr(layer, 'backprops_list', [])
layer.backprops_list.append(output[0].detach())
def clear_backprops(model: nn.Module) -> None:
"""Delete layer.backprops_list in every layer."""
for layer in model.modules():
if hasattr(layer, 'backprops_list'):
del layer.backprops_list
def compute_grad1(model: nn.Module, loss_type: str = 'mean') -> None:
"""
Compute per-example gradients and save them under 'param.grad1'. Must be called after loss.backprop()
Args:
model:
loss_type: either "mean" or "sum" depending whether backpropped loss was averaged or summed over batch
"""
assert loss_type in ('sum', 'mean')
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
assert hasattr(layer, 'activations'), "No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
assert len(layer.backprops_list) == 1, "Multiple backprops detected, make sure to call clear_backprops(model)"
A = layer.activations
n = A.shape[0]
if loss_type == 'mean':
B = layer.backprops_list[0] * n
else: # loss_type == 'sum':
B = layer.backprops_list[0]
if layer_type == 'Linear':
setattr(layer.weight, 'grad1', torch.einsum('ni,nj->nij', B, A))
if layer.bias is not None:
setattr(layer.bias, 'grad1', B)
elif layer_type == 'Conv2d':
A = torch.nn.functional.unfold(A, layer.kernel_size, dilation=layer.dilation, padding=layer.padding, stride=layer.stride)
#A = torch.nn.functional.unfold(A, layer.kernel_size)
B = B.reshape(n, -1, A.shape[-1])
grad1 = torch.einsum('ijk,ilk->ijl', B, A)
shape = [n] + list(layer.weight.shape)
setattr(layer.weight, 'grad1', grad1.reshape(shape))
if layer.bias is not None:
setattr(layer.bias, 'grad1', torch.sum(B, dim=2))
def compute_hess(model: nn.Module,) -> None:
"""Save Hessian under param.hess for each param in the model"""
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
assert hasattr(layer, 'activations'), "No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
if layer_type == 'Linear':
A = layer.activations
B = torch.stack(layer.backprops_list)
n = A.shape[0]
o = B.shape[0]
A = torch.stack([A] * o)
Jb = torch.einsum("oni,onj->onij", B, A).reshape(n*o, -1)
H = torch.einsum('ni,nj->ij', Jb, Jb) / n
setattr(layer.weight, 'hess', H)
if layer.bias is not None:
setattr(layer.bias, 'hess', torch.einsum('oni,onj->ij', B, B)/n)
elif layer_type == 'Conv2d':
Kh, Kw = layer.kernel_size
di, do = layer.in_channels, layer.out_channels
A = layer.activations.detach()
A = torch.nn.functional.unfold(A, (Kh, Kw)) # n, di * Kh * Kw, Oh * Ow
n = A.shape[0]
B = torch.stack([Bt.reshape(n, do, -1) for Bt in layer.backprops_list]) # o, n, do, Oh*Ow
o = B.shape[0]
A = torch.stack([A] * o) # o, n, di * Kh * Kw, Oh*Ow
Jb = torch.einsum('onij,onkj->onik', B, A) # o, n, do, di * Kh * Kw
Hi = torch.einsum('onij,onkl->nijkl', Jb, Jb) # n, do, di*Kh*Kw, do, di*Kh*Kw
Jb_bias = torch.einsum('onij->oni', B)
Hi_bias = torch.einsum('oni,onj->nij', Jb_bias, Jb_bias)
setattr(layer.weight, 'hess', Hi.mean(dim=0))
if layer.bias is not None:
setattr(layer.bias, 'hess', Hi_bias.mean(dim=0))
def backprop_hess(output: torch.Tensor, hess_type: str) -> None:
"""
Call backprop 1 or more times to get values needed for Hessian computation.
Args:
output: prediction of neural network (ie, input of nn.CrossEntropyLoss())
hess_type: type of Hessian propagation, "CrossEntropy" results in exact Hessian for CrossEntropy
Returns:
"""
assert hess_type in ('LeastSquares', 'CrossEntropy')
global _enforce_fresh_backprop
n, o = output.shape
_enforce_fresh_backprop = True
if hess_type == 'CrossEntropy':
batch = F.softmax(output, dim=1)
mask = torch.eye(o).expand(n, o, o)
diag_part = batch.unsqueeze(2).expand(n, o, o) * mask
outer_prod_part = torch.einsum('ij,ik->ijk', batch, batch)
hess = diag_part - outer_prod_part
assert hess.shape == (n, o, o)
for i in range(n):
hess[i, :, :] = symsqrt(hess[i, :, :])
hess = hess.transpose(0, 1)
elif hess_type == 'LeastSquares':
hess = []
assert len(output.shape) == 2
batch_size, output_size = output.shape
id_mat = torch.eye(output_size)
for out_idx in range(output_size):
hess.append(torch.stack([id_mat[out_idx]] * batch_size))
for o in range(o):
output.backward(hess[o], retain_graph=True)
def symsqrt(a, cond=None, return_rank=False, dtype=torch.float32):
"""Symmetric square root of a positive semi-definite matrix.
See https://github.com/pytorch/pytorch/issues/25481"""
s, u = torch.symeig(a, eigenvectors=True)
cond_dict = {torch.float32: 1e3 * 1.1920929e-07, torch.float64: 1E6 * 2.220446049250313e-16}
if cond in [None, -1]:
cond = cond_dict[dtype]
above_cutoff = (abs(s) > cond * torch.max(abs(s)))
psigma_diag = torch.sqrt(s[above_cutoff])
u = u[:, above_cutoff]
B = u @ torch.diag(psigma_diag) @ u.t()
if return_rank:
return B, len(psigma_diag)
else:
return B
| 34.414634 | 133 | 0.64311 |
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
_supported_layers = ['Linear', 'Conv2d']
_hooks_disabled: bool = False
_enforce_fresh_backprop: bool = False
def add_hooks(model: nn.Module) -> None:
global _hooks_disabled
_hooks_disabled = False
handles = []
for layer in model.modules():
if _layer_type(layer) in _supported_layers:
handles.append(layer.register_forward_hook(_capture_activations))
handles.append(layer.register_backward_hook(_capture_backprops))
model.__dict__.setdefault('autograd_hacks_hooks', []).extend(handles)
def remove_hooks(model: nn.Module) -> None:
assert model == 0, "not working, remove this after fix to https://github.com/pytorch/pytorch/issues/25723"
if not hasattr(model, 'autograd_hacks_hooks'):
print("Warning, asked to remove hooks, but no hooks found")
else:
for handle in model.autograd_hacks_hooks:
handle.remove()
del model.autograd_hacks_hooks
def disable_hooks() -> None:
global _hooks_disabled
_hooks_disabled = True
def enable_hooks() -> None:
global _hooks_disabled
_hooks_disabled = False
def is_supported(layer: nn.Module) -> bool:
return _layer_type(layer) in _supported_layers
def _layer_type(layer: nn.Module) -> str:
return layer.__class__.__name__
def _capture_activations(layer: nn.Module, input: List[torch.Tensor], output: torch.Tensor):
if _hooks_disabled:
return
assert _layer_type(layer) in _supported_layers, "Hook installed on unsupported layer, this shouldn't happen"
setattr(layer, "activations", input[0].detach())
def _capture_backprops(layer: nn.Module, _input, output):
global _enforce_fresh_backprop
if _hooks_disabled:
return
if _enforce_fresh_backprop:
assert not hasattr(layer, 'backprops_list'), "Seeing result of previous backprop, use clear_backprops(model) to clear"
_enforce_fresh_backprop = False
if not hasattr(layer, 'backprops_list'):
setattr(layer, 'backprops_list', [])
layer.backprops_list.append(output[0].detach())
def clear_backprops(model: nn.Module) -> None:
for layer in model.modules():
if hasattr(layer, 'backprops_list'):
del layer.backprops_list
def compute_grad1(model: nn.Module, loss_type: str = 'mean') -> None:
assert loss_type in ('sum', 'mean')
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
assert hasattr(layer, 'activations'), "No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
assert len(layer.backprops_list) == 1, "Multiple backprops detected, make sure to call clear_backprops(model)"
A = layer.activations
n = A.shape[0]
if loss_type == 'mean':
B = layer.backprops_list[0] * n
else: # loss_type == 'sum':
B = layer.backprops_list[0]
if layer_type == 'Linear':
setattr(layer.weight, 'grad1', torch.einsum('ni,nj->nij', B, A))
if layer.bias is not None:
setattr(layer.bias, 'grad1', B)
elif layer_type == 'Conv2d':
A = torch.nn.functional.unfold(A, layer.kernel_size, dilation=layer.dilation, padding=layer.padding, stride=layer.stride)
#A = torch.nn.functional.unfold(A, layer.kernel_size)
B = B.reshape(n, -1, A.shape[-1])
grad1 = torch.einsum('ijk,ilk->ijl', B, A)
shape = [n] + list(layer.weight.shape)
setattr(layer.weight, 'grad1', grad1.reshape(shape))
if layer.bias is not None:
setattr(layer.bias, 'grad1', torch.sum(B, dim=2))
def compute_hess(model: nn.Module,) -> None:
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
assert hasattr(layer, 'activations'), "No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
if layer_type == 'Linear':
A = layer.activations
B = torch.stack(layer.backprops_list)
n = A.shape[0]
o = B.shape[0]
A = torch.stack([A] * o)
Jb = torch.einsum("oni,onj->onij", B, A).reshape(n*o, -1)
H = torch.einsum('ni,nj->ij', Jb, Jb) / n
setattr(layer.weight, 'hess', H)
if layer.bias is not None:
setattr(layer.bias, 'hess', torch.einsum('oni,onj->ij', B, B)/n)
elif layer_type == 'Conv2d':
Kh, Kw = layer.kernel_size
di, do = layer.in_channels, layer.out_channels
A = layer.activations.detach()
A = torch.nn.functional.unfold(A, (Kh, Kw)) # n, di * Kh * Kw, Oh * Ow
n = A.shape[0]
B = torch.stack([Bt.reshape(n, do, -1) for Bt in layer.backprops_list]) # o, n, do, Oh*Ow
o = B.shape[0]
A = torch.stack([A] * o) # o, n, di * Kh * Kw, Oh*Ow
Jb = torch.einsum('onij,onkj->onik', B, A) # o, n, do, di * Kh * Kw
Hi = torch.einsum('onij,onkl->nijkl', Jb, Jb) # n, do, di*Kh*Kw, do, di*Kh*Kw
Jb_bias = torch.einsum('onij->oni', B)
Hi_bias = torch.einsum('oni,onj->nij', Jb_bias, Jb_bias)
setattr(layer.weight, 'hess', Hi.mean(dim=0))
if layer.bias is not None:
setattr(layer.bias, 'hess', Hi_bias.mean(dim=0))
def backprop_hess(output: torch.Tensor, hess_type: str) -> None:
assert hess_type in ('LeastSquares', 'CrossEntropy')
global _enforce_fresh_backprop
n, o = output.shape
_enforce_fresh_backprop = True
if hess_type == 'CrossEntropy':
batch = F.softmax(output, dim=1)
mask = torch.eye(o).expand(n, o, o)
diag_part = batch.unsqueeze(2).expand(n, o, o) * mask
outer_prod_part = torch.einsum('ij,ik->ijk', batch, batch)
hess = diag_part - outer_prod_part
assert hess.shape == (n, o, o)
for i in range(n):
hess[i, :, :] = symsqrt(hess[i, :, :])
hess = hess.transpose(0, 1)
elif hess_type == 'LeastSquares':
hess = []
assert len(output.shape) == 2
batch_size, output_size = output.shape
id_mat = torch.eye(output_size)
for out_idx in range(output_size):
hess.append(torch.stack([id_mat[out_idx]] * batch_size))
for o in range(o):
output.backward(hess[o], retain_graph=True)
def symsqrt(a, cond=None, return_rank=False, dtype=torch.float32):
s, u = torch.symeig(a, eigenvectors=True)
cond_dict = {torch.float32: 1e3 * 1.1920929e-07, torch.float64: 1E6 * 2.220446049250313e-16}
if cond in [None, -1]:
cond = cond_dict[dtype]
above_cutoff = (abs(s) > cond * torch.max(abs(s)))
psigma_diag = torch.sqrt(s[above_cutoff])
u = u[:, above_cutoff]
B = u @ torch.diag(psigma_diag) @ u.t()
if return_rank:
return B, len(psigma_diag)
else:
return B
| true | true |
f7fdbc862b000826917fa0aea9b1fd061d433f57 | 2,344 | py | Python | cut.py | Jhilbertxtu/JDComments_Analyze | 9a93c7cfc572509fce5e0f82702d8d55d029ef8f | [
"MIT"
] | 2 | 2021-03-01T13:32:22.000Z | 2021-07-28T13:37:43.000Z | cut.py | Jhilbertxtu/JDComments_Analyze | 9a93c7cfc572509fce5e0f82702d8d55d029ef8f | [
"MIT"
] | null | null | null | cut.py | Jhilbertxtu/JDComments_Analyze | 9a93c7cfc572509fce5e0f82702d8d55d029ef8f | [
"MIT"
] | null | null | null | '''
处理语料数据
数据来源:京东手机分类>按评论排序后的第一页所有产品(60个)->产品评论5星和1星的评论内容(见single_spiders/JDcomments)
去标点及英文后逐行读取文件数据进行jieba分词
去除空行,重复,停用词
然后导到mysql再去重一次,得到dis_neg_cut.txt,dis_pos_cut.txt
坑:
差评一般是真的差,好评不一定是好!!!
'''
import jieba
import jieba.analyse
import codecs,re
def pdata(inFile,outFile,st,good):
'''
处理评论数据,good是特别为隐藏在好评中的差评准备的
:param inFile:
:param outFile:
:param st:
:param good:
:return:
'''
f=codecs.open(inFile,'r',encoding='utf8')
target=codecs.open(outFile,'w',encoding='utf8')
i=1
line=f.readline()
while line:
line=fix(line,good)
if line:
line=cut(line,st)
if line:
target.writelines (line + '\n')
i=i + 1
line=f.readline()
def cut(line,st):
'''
去除空行,重复,停用词
:param line:
:param st:
:return:
'''
segList=jieba.cut (line, cut_all=False)
segSentence=[]
for word in segList:
if word != '\t' and word !='\n' and word not in st and word not in segSentence:
segSentence.append(word)
if len(segSentence)>0:
return ' '.join(segSentence)
def fix(l,good):
'''
去英文,数字,符号及好评中的差评
:param l:
:param good:
:return:
'''
line=l.strip ()
if good==1:
# 去掉好评数据的差评!!!
del_line=['不好看','坑','骗','垃圾','死机','太差','磨损','不舒服','卡的要死','反应慢','后悔','闪屏','黑屏','信号差','缺点','差评',
'不好用','不爽','噪音','卡死','失望','可怜','二手','生气','不满意','烦人','山寨','退货','差劲','无法','不太方便','不适合','上当','妈的','赔偿']
for s in del_line:
if s in line:
return None
# 去除文本中的英文和数字
line=re.sub ("[a-zA-Z0-9]", "", line)
# 去除文本中的中文符号和英文符号
line=re.sub ("[\s+\.\!\/_,$%^*(+\"\';:“”.]+|[+——!,。??、~@#¥%……&*()~ ;℃: ̄▽)]+", "", line)
return line
def main():
jieba.load_userdict('datas/phone_dict.txt')
stopkey=[w.strip () for w in codecs.open ('datas/stopwords1598.txt', 'r', encoding='utf-8').readlines ()]
# n_in为负面语料,p_in为正面语料
# n_in='datas/phone_comments_neg.txt'
# p_in='datas/phone_comments_pos.txt'
# n_out='datas/phone_comments_neg_cut.txt'
# p_out='datas/phone_comments_pos_cut.txt'
# pdata (n_in, n_out,stopkey,0)
# pdata (p_in, p_out,stopkey,1)
test='datas/test.txt'
test_out='datas/test_cut.txt'
pdata(test,test_out,stopkey,0)
if __name__=='__main__':
main() | 27.904762 | 118 | 0.572099 | import jieba
import jieba.analyse
import codecs,re
def pdata(inFile,outFile,st,good):
f=codecs.open(inFile,'r',encoding='utf8')
target=codecs.open(outFile,'w',encoding='utf8')
i=1
line=f.readline()
while line:
line=fix(line,good)
if line:
line=cut(line,st)
if line:
target.writelines (line + '\n')
i=i + 1
line=f.readline()
def cut(line,st):
segList=jieba.cut (line, cut_all=False)
segSentence=[]
for word in segList:
if word != '\t' and word !='\n' and word not in st and word not in segSentence:
segSentence.append(word)
if len(segSentence)>0:
return ' '.join(segSentence)
def fix(l,good):
line=l.strip ()
if good==1:
del_line=['不好看','坑','骗','垃圾','死机','太差','磨损','不舒服','卡的要死','反应慢','后悔','闪屏','黑屏','信号差','缺点','差评',
'不好用','不爽','噪音','卡死','失望','可怜','二手','生气','不满意','烦人','山寨','退货','差劲','无法','不太方便','不适合','上当','妈的','赔偿']
for s in del_line:
if s in line:
return None
line=re.sub ("[a-zA-Z0-9]", "", line)
line=re.sub ("[\s+\.\!\/_,$%^*(+\"\';:“”.]+|[+——!,。??、~@#¥%……&*()~ ;℃: ̄▽)]+", "", line)
return line
def main():
jieba.load_userdict('datas/phone_dict.txt')
stopkey=[w.strip () for w in codecs.open ('datas/stopwords1598.txt', 'r', encoding='utf-8').readlines ()]
# n_in为负面语料,p_in为正面语料
# n_in='datas/phone_comments_neg.txt'
# p_in='datas/phone_comments_pos.txt'
# n_out='datas/phone_comments_neg_cut.txt'
# p_out='datas/phone_comments_pos_cut.txt'
# pdata (n_in, n_out,stopkey,0)
# pdata (p_in, p_out,stopkey,1)
test='datas/test.txt'
test_out='datas/test_cut.txt'
pdata(test,test_out,stopkey,0)
if __name__=='__main__':
main() | true | true |
f7fdbced9c81ca99b1680779c870d1cc2fc3e5d0 | 2,062 | py | Python | angr/analyses/decompiler/peephole_optimizations/conv_shl_shr.py | BA7JCM/angr | 187a713c35759d998d93dfc5280630976d42d717 | [
"BSD-2-Clause"
] | null | null | null | angr/analyses/decompiler/peephole_optimizations/conv_shl_shr.py | BA7JCM/angr | 187a713c35759d998d93dfc5280630976d42d717 | [
"BSD-2-Clause"
] | null | null | null | angr/analyses/decompiler/peephole_optimizations/conv_shl_shr.py | BA7JCM/angr | 187a713c35759d998d93dfc5280630976d42d717 | [
"BSD-2-Clause"
] | null | null | null | from ailment.expression import Convert, BinaryOp, Const
from .base import PeepholeOptimizationExprBase
class ConvShlShr(PeepholeOptimizationExprBase):
__slots__ = ()
NAME = "(expr << P) >> Q => (expr & mask) >> R"
expr_classes = (BinaryOp, ) # all expressions are allowed
def optimize(self, expr: BinaryOp):
# (Conv(M->N, expr) << P) >> Q ==> (Conv(M->N, expr) & bitmask) >> (Q-P), where
# Q >= P, and
# M < N, and
# bitmask = 0b('1' * (N - P))
if expr.op == "Shr" and isinstance(expr.operands[1], Const):
q = expr.operands[1].value
expr_b = expr.operands[0]
if isinstance(expr_b, BinaryOp) and expr_b.op == "Shl" and isinstance(expr_b.operands[1], Const):
p = expr_b.operands[1].value
expr_a = expr_b.operands[0]
if q >= p and isinstance(expr_a, Convert) and not expr_a.is_signed:
m = expr_a.from_bits
n = expr_a.to_bits
if m < n and n >= p:
bitmask = (1 << (n - p)) - 1
and_expr = BinaryOp(
None,
'And',
(
Convert(expr_a.idx, m, n, False, expr_a.operand, **expr_a.tags),
Const(None, None, bitmask, n),
),
False,
variable=None,
variable_offset=None,
**expr.tags,
)
return BinaryOp(
None,
'Shr',
(
and_expr,
Const(None, None, q - p, and_expr.bits),
),
False,
**expr.tags,
)
return None
| 38.90566 | 109 | 0.387003 | from ailment.expression import Convert, BinaryOp, Const
from .base import PeepholeOptimizationExprBase
class ConvShlShr(PeepholeOptimizationExprBase):
__slots__ = ()
NAME = "(expr << P) >> Q => (expr & mask) >> R"
expr_classes = (BinaryOp, )
def optimize(self, expr: BinaryOp):
if expr.op == "Shr" and isinstance(expr.operands[1], Const):
q = expr.operands[1].value
expr_b = expr.operands[0]
if isinstance(expr_b, BinaryOp) and expr_b.op == "Shl" and isinstance(expr_b.operands[1], Const):
p = expr_b.operands[1].value
expr_a = expr_b.operands[0]
if q >= p and isinstance(expr_a, Convert) and not expr_a.is_signed:
m = expr_a.from_bits
n = expr_a.to_bits
if m < n and n >= p:
bitmask = (1 << (n - p)) - 1
and_expr = BinaryOp(
None,
'And',
(
Convert(expr_a.idx, m, n, False, expr_a.operand, **expr_a.tags),
Const(None, None, bitmask, n),
),
False,
variable=None,
variable_offset=None,
**expr.tags,
)
return BinaryOp(
None,
'Shr',
(
and_expr,
Const(None, None, q - p, and_expr.bits),
),
False,
**expr.tags,
)
return None
| true | true |
f7fdbd064c1f4d795998df200938ea7655036464 | 5,382 | py | Python | azure-cognitiveservices-search-customimagesearch/azure/cognitiveservices/search/customimagesearch/models/media_object_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-cognitiveservices-search-customimagesearch/azure/cognitiveservices/search/customimagesearch/models/media_object_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-cognitiveservices-search-customimagesearch/azure/cognitiveservices/search/customimagesearch/models/media_object_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .creative_work import CreativeWork
class MediaObject(CreativeWork):
"""Defines a media object.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ImageObject
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar image: An image of the item.
:vartype image:
~azure.cognitiveservices.search.customimagesearch.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar alternate_name: An alias for the item
:vartype alternate_name: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.customimagesearch.models.Thing]
:ivar text: Text content of this creative work
:vartype text: str
:ivar content_url: Original URL to retrieve the source (file) for the
media object (e.g the source URL for the image).
:vartype content_url: str
:ivar host_page_url: URL of the page that hosts the media object.
:vartype host_page_url: str
:ivar content_size: Size of the media object content (use format "value
unit" e.g "1024 B").
:vartype content_size: str
:ivar encoding_format: Encoding format (e.g mp3, mp4, jpeg, etc).
:vartype encoding_format: str
:ivar host_page_display_url: Display URL of the page that hosts the media
object.
:vartype host_page_display_url: str
:ivar width: The width of the media object, in pixels.
:vartype width: int
:ivar height: The height of the media object, in pixels.
:vartype height: int
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'alternate_name': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'content_url': {'readonly': True},
'host_page_url': {'readonly': True},
'content_size': {'readonly': True},
'encoding_format': {'readonly': True},
'host_page_display_url': {'readonly': True},
'width': {'readonly': True},
'height': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'alternate_name': {'key': 'alternateName', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'content_url': {'key': 'contentUrl', 'type': 'str'},
'host_page_url': {'key': 'hostPageUrl', 'type': 'str'},
'content_size': {'key': 'contentSize', 'type': 'str'},
'encoding_format': {'key': 'encodingFormat', 'type': 'str'},
'host_page_display_url': {'key': 'hostPageDisplayUrl', 'type': 'str'},
'width': {'key': 'width', 'type': 'int'},
'height': {'key': 'height', 'type': 'int'},
}
_subtype_map = {
'_type': {'ImageObject': 'ImageObject'}
}
def __init__(self, **kwargs) -> None:
super(MediaObject, self).__init__(**kwargs)
self.content_url = None
self.host_page_url = None
self.content_size = None
self.encoding_format = None
self.host_page_display_url = None
self.width = None
self.height = None
self._type = 'MediaObject'
| 40.164179 | 79 | 0.603865 |
from .creative_work import CreativeWork
class MediaObject(CreativeWork):
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'alternate_name': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'text': {'readonly': True},
'content_url': {'readonly': True},
'host_page_url': {'readonly': True},
'content_size': {'readonly': True},
'encoding_format': {'readonly': True},
'host_page_display_url': {'readonly': True},
'width': {'readonly': True},
'height': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'alternate_name': {'key': 'alternateName', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'text': {'key': 'text', 'type': 'str'},
'content_url': {'key': 'contentUrl', 'type': 'str'},
'host_page_url': {'key': 'hostPageUrl', 'type': 'str'},
'content_size': {'key': 'contentSize', 'type': 'str'},
'encoding_format': {'key': 'encodingFormat', 'type': 'str'},
'host_page_display_url': {'key': 'hostPageDisplayUrl', 'type': 'str'},
'width': {'key': 'width', 'type': 'int'},
'height': {'key': 'height', 'type': 'int'},
}
_subtype_map = {
'_type': {'ImageObject': 'ImageObject'}
}
def __init__(self, **kwargs) -> None:
super(MediaObject, self).__init__(**kwargs)
self.content_url = None
self.host_page_url = None
self.content_size = None
self.encoding_format = None
self.host_page_display_url = None
self.width = None
self.height = None
self._type = 'MediaObject'
| true | true |
f7fdbdb07b5f903ceb6245f909088d02120a539c | 891 | py | Python | ksif/util/spinner.py | MingyoJung/ksif | ccb96cc8c0fd4588ddb3e2ee65596691af914f86 | [
"MIT"
] | 2 | 2018-07-31T07:45:42.000Z | 2018-09-07T07:01:54.000Z | ksif/util/spinner.py | MingyoJung/ksif | ccb96cc8c0fd4588ddb3e2ee65596691af914f86 | [
"MIT"
] | 2 | 2018-07-30T10:48:31.000Z | 2018-08-02T11:33:24.000Z | ksif/util/spinner.py | MingyoJung/ksif | ccb96cc8c0fd4588ddb3e2ee65596691af914f86 | [
"MIT"
] | 1 | 2019-03-01T05:31:21.000Z | 2019-03-01T05:31:21.000Z | """
Date : 2018. 9. 1
Author : Jiwoo Park
"""
import time
import threading
import itertools
import sys
class Spinner():
spinner_char = itertools.cycle(['-','/','|','\\'])
def __init__(self):
self.stop_running = threading.Event()
self.spin_thread = threading.Thread(target=self.init_spin)
def start(self):
self.spin_thread.start()
def stop(self):
self.stop_running.set()
self.spin_thread.join()
def init_spin(self):
while not self.stop_running.is_set():
sys.stdout.write(next(self.spinner_char))
sys.stdout.flush()
time.sleep(0.25)
sys.stdout.write('\b')
def spinner(func):
spin = Spinner()
def inner():
spin.start()
func()
spin.stop()
return inner
@spinner
def do_work():
time.sleep(3)
if __name__ == "__main__":
do_work()
| 19.8 | 66 | 0.589226 |
import time
import threading
import itertools
import sys
class Spinner():
spinner_char = itertools.cycle(['-','/','|','\\'])
def __init__(self):
self.stop_running = threading.Event()
self.spin_thread = threading.Thread(target=self.init_spin)
def start(self):
self.spin_thread.start()
def stop(self):
self.stop_running.set()
self.spin_thread.join()
def init_spin(self):
while not self.stop_running.is_set():
sys.stdout.write(next(self.spinner_char))
sys.stdout.flush()
time.sleep(0.25)
sys.stdout.write('\b')
def spinner(func):
spin = Spinner()
def inner():
spin.start()
func()
spin.stop()
return inner
@spinner
def do_work():
time.sleep(3)
if __name__ == "__main__":
do_work()
| true | true |
f7fdbff52622f6c30779279369d2cd6798075004 | 1,781 | py | Python | cached_path/schemes/s3.py | allenai/cached_path | f89e348e76913183d2bf90cc70db8ca87f4227e2 | [
"Apache-2.0"
] | 11 | 2021-09-10T19:25:40.000Z | 2022-01-13T07:44:59.000Z | cached_path/schemes/s3.py | allenai/cached_path | f89e348e76913183d2bf90cc70db8ca87f4227e2 | [
"Apache-2.0"
] | 31 | 2021-09-09T17:05:56.000Z | 2022-03-25T16:10:15.000Z | cached_path/schemes/s3.py | allenai/cached_path | f89e348e76913183d2bf90cc70db8ca87f4227e2 | [
"Apache-2.0"
] | 2 | 2021-09-10T19:22:25.000Z | 2021-09-15T02:28:04.000Z | """
AWS S3.
"""
from typing import IO, Optional, Tuple
import boto3
import botocore
from cached_path.common import _split_cloud_path
from cached_path.schemes.scheme_client import SchemeClient
from cached_path.tqdm import Tqdm
class S3Client(SchemeClient):
recoverable_errors = SchemeClient.recoverable_errors + (
botocore.exceptions.EndpointConnectionError,
)
scheme = "s3"
def __init__(self, resource: str) -> None:
super().__init__(resource)
bucket_name, s3_path = S3Client.split_s3_path(resource)
session = boto3.session.Session()
if session.get_credentials() is None:
# Use unsigned requests.
s3_resource = session.resource(
"s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
else:
s3_resource = session.resource("s3")
self.s3_object = s3_resource.Object(bucket_name, s3_path)
def get_etag(self) -> Optional[str]:
try:
self.s3_object.load()
except botocore.exceptions.ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(self.resource))
else:
raise
return self.s3_object.e_tag
def get_resource(self, temp_file: IO) -> None:
progress = Tqdm.tqdm(
unit="iB",
unit_scale=True,
unit_divisor=1024,
total=self.s3_object.content_length,
desc="downloading",
)
self.s3_object.download_fileobj(temp_file, Callback=progress.update)
progress.close()
@staticmethod
def split_s3_path(url: str) -> Tuple[str, str]:
return _split_cloud_path(url, "s3")
| 30.706897 | 88 | 0.632791 |
from typing import IO, Optional, Tuple
import boto3
import botocore
from cached_path.common import _split_cloud_path
from cached_path.schemes.scheme_client import SchemeClient
from cached_path.tqdm import Tqdm
class S3Client(SchemeClient):
recoverable_errors = SchemeClient.recoverable_errors + (
botocore.exceptions.EndpointConnectionError,
)
scheme = "s3"
def __init__(self, resource: str) -> None:
super().__init__(resource)
bucket_name, s3_path = S3Client.split_s3_path(resource)
session = boto3.session.Session()
if session.get_credentials() is None:
s3_resource = session.resource(
"s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
else:
s3_resource = session.resource("s3")
self.s3_object = s3_resource.Object(bucket_name, s3_path)
def get_etag(self) -> Optional[str]:
try:
self.s3_object.load()
except botocore.exceptions.ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(self.resource))
else:
raise
return self.s3_object.e_tag
def get_resource(self, temp_file: IO) -> None:
progress = Tqdm.tqdm(
unit="iB",
unit_scale=True,
unit_divisor=1024,
total=self.s3_object.content_length,
desc="downloading",
)
self.s3_object.download_fileobj(temp_file, Callback=progress.update)
progress.close()
@staticmethod
def split_s3_path(url: str) -> Tuple[str, str]:
return _split_cloud_path(url, "s3")
| true | true |
f7fdc07b196b86a4aeedff990553d210fcd7a25c | 1,126 | py | Python | selenium_python_docker_test/test_selenium_remote_webdriver.py | WommyInStandingPosition/YtbDataApiRelated | 4856ad2ee5be49bb74c79c3d6649f9d1fdbdc85d | [
"MIT"
] | null | null | null | selenium_python_docker_test/test_selenium_remote_webdriver.py | WommyInStandingPosition/YtbDataApiRelated | 4856ad2ee5be49bb74c79c3d6649f9d1fdbdc85d | [
"MIT"
] | null | null | null | selenium_python_docker_test/test_selenium_remote_webdriver.py | WommyInStandingPosition/YtbDataApiRelated | 4856ad2ee5be49bb74c79c3d6649f9d1fdbdc85d | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
command_executor = 'http://localhost:4445/wd/hub'
driver = webdriver.Remote(command_executor, desired_capabilities=DesiredCapabilities.FIREFOX)
driver.get("https://sslproxies.org/")
ips = [my_elem.get_attribute("innerHTML") for my_elem in WebDriverWait(driver, 5).until(
EC.visibility_of_all_elements_located((By.XPATH,
"//table[@class='table table-striped table-bordered']//tbody//tr/td[position() = 1]")))]
ports = [my_elem.get_attribute("innerHTML") for my_elem in WebDriverWait(driver, 5).until(
EC.visibility_of_all_elements_located((By.XPATH,
"//table[@class='table table-striped table-bordered']//tbody//tr/td[position() = 2]")))]
driver.quit()
proxies = []
for i in range(0, len(ips)):
proxies.append(ips[i] + ':' + ports[i])
print(proxies) | 51.181818 | 131 | 0.714032 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
command_executor = 'http://localhost:4445/wd/hub'
driver = webdriver.Remote(command_executor, desired_capabilities=DesiredCapabilities.FIREFOX)
driver.get("https://sslproxies.org/")
ips = [my_elem.get_attribute("innerHTML") for my_elem in WebDriverWait(driver, 5).until(
EC.visibility_of_all_elements_located((By.XPATH,
"//table[@class='table table-striped table-bordered']//tbody//tr/td[position() = 1]")))]
ports = [my_elem.get_attribute("innerHTML") for my_elem in WebDriverWait(driver, 5).until(
EC.visibility_of_all_elements_located((By.XPATH,
"//table[@class='table table-striped table-bordered']//tbody//tr/td[position() = 2]")))]
driver.quit()
proxies = []
for i in range(0, len(ips)):
proxies.append(ips[i] + ':' + ports[i])
print(proxies) | true | true |
f7fdc15f0f71c624018f6e0e41114b4262a7eb97 | 39,327 | py | Python | vnpy/app/multifactor_strategy/engine.py | AMAZED-FINTECH/vnpy-Amazed-Fintech | b0c938a66e518e1ed87d828566419ee176e0959a | [
"MIT"
] | 10 | 2019-06-18T04:52:38.000Z | 2019-12-29T03:11:15.000Z | vnpy/app/multifactor_strategy/engine.py | AMAZED-FINTECH/vnpy-Amazed-Fintech | b0c938a66e518e1ed87d828566419ee176e0959a | [
"MIT"
] | null | null | null | vnpy/app/multifactor_strategy/engine.py | AMAZED-FINTECH/vnpy-Amazed-Fintech | b0c938a66e518e1ed87d828566419ee176e0959a | [
"MIT"
] | null | null | null | """"""
import importlib
import os
import traceback
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable
from datetime import datetime, timedelta
from threading import Thread
from queue import Queue, Empty
from copy import copy, deepcopy
import time
import psutil
import os
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
OrderRequest,
SubscribeRequest,
HistoryRequest,
LogData,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_BAR,
EVENT_ACCOUNT
)
from vnpy.trader.constant import (
Direction,
OrderType,
Interval,
Exchange,
Offset,
Status
)
from vnpy.trader.utility import load_json, save_json, extract_vt_symbol, round_to
from vnpy.trader.database import database_manager
# from vnpy.trader.rqdata import rqdata_client
from .base import (
APP_NAME,
EVENT_MULTIFACTOR_LOG,
EVENT_MULTIFACTOR_STRATEGY,
EVENT_MULTIFACTOR_STOPORDER,
EngineType,
StopOrder,
StopOrderStatus,
STOPORDER_PREFIX
)
from .template import MultiFactorTemplate
from .converter import OffsetConverter
from .DBMongo import dbMongo
STOP_STATUS_MAP = {
Status.SUBMITTING: StopOrderStatus.WAITING,
Status.NOTTRADED: StopOrderStatus.WAITING,
Status.PARTTRADED: StopOrderStatus.TRIGGERED,
Status.ALLTRADED: StopOrderStatus.TRIGGERED,
Status.CANCELLED: StopOrderStatus.CANCELLED,
Status.REJECTED: StopOrderStatus.CANCELLED
}
class MultiFactorEngine(BaseEngine):
"""Cta引擎,提供Cta功能与主引擎的交互"""
engine_type = EngineType.LIVE # live trading engine
# 配置文件
setting_filename = "multifactor_setting.json"
data_filename = "multifactor_data.json"
setting_dbname = "multifactor_strategy_setting"
data_dbname = "multifactor_strategy_data"
account_id = "mytest"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
"""初始化,与其他模块一样,提供与主引擎交互的方法"""
super(MultiFactorEngine, self).__init__(
main_engine, event_engine, APP_NAME)
# 配置dict,数据dict
self.strategy_setting = {} # strategy_name: dict
self.strategy_data = {} # strategy_name: dict
# 策略的类,策略名称
self.classes = {} # class_name: stategy_class
self.strategies = {} # strategy_name: strategy
# 策略的order哪个orderid对应哪个strategy
self.orderid_strategy_map = {} # vt_orderid: strategy
# 策略名称对应orderid,一个策略对应多个id
self.strategy_orderid_map = defaultdict(
set) # strategy_name: orderid list
# 停止单下单数量
self.stop_order_count = 0 # for generating stop_orderid
# 停止单id
self.stop_orders = {} # stop_orderid: stop_order
# 初始化线程
self.init_thread = None
# 初始化队列
self.init_queue = Queue()
self.rq_client = None
self.rq_symbols = set()
# 策略成交
self.vt_tradeids = set() # for filtering duplicate trade
# 转换
self.offset_converter = OffsetConverter(self.main_engine)
# 自主添加
# DB 数据库
self.db_mongo = dbMongo()
self.db_thread = None
self.db_queue = Queue()
self.db_active = False
self.db_count = 0
def init_engine(self):
"""
初始化引擎,初始化米匡
载入所有策略(类)
载入所有策略配置
载入所有策略数据
注册事件
引擎初始化
"""
# 开启策略线程
self.db_start()
self.load_strategy_class()
self.load_strategy_setting()
self.load_strategy_data()
self.register_event()
self.write_log("CTA策略引擎初始化成功")
def account_id_change(self, new_id):
self.account_id = new_id
def init_dbmongo(self, name=None, password=None, ip="localhost", port="27017"):
self.db_name = name
self.db_pwd = password
self.db_mongo = dbMongo(name, password, ip, port)
def close(self):
"""
关闭所有策略
"""
self.stop_all_strategies()
def register_event(self):
"""注册事件,tick, order, trade, position, 少了一个account?"""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_BAR, self.process_bar_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
def process_tick_event(self, event: Event):
"""和cta策略不同,每个Tick都推送到所有的策略中去"""
tick = event.data
# 多因子模型,每个策略都需要所有的tick
strategies = self.strategies.values()
if not strategies:
return
# 先根据tick检查本地停止单
self.check_stop_order(tick)
# 如果有策略,推送至策略的on_tick情况,由tick合成bar,进而由策略自己使用
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_tick, tick)
def process_bar_event(self, event: Event):
"""处理bar事件,主要是向订阅了bar的策略推送"""
bar = deepcopy(event.data)
strategies = self.strategies.values()
if not strategies:
return
# Bar不检查停止单
# self.check_stop_order(bar)
# 如果有策略,推送至策略的on_bar情况,由分钟bar合成更大级别的bar,进而由策略自己使用
for strategy in strategies:
#if strategy.inited:
# 日志记录时,要先显示发出,再由策略收到
# self.write_log("engine process Bar_Data:" + str(bar.__dict__), strategy)
self.call_strategy_func(strategy, strategy.on_bar, bar)
# =================================
# 这里在测试的时候,写入数据库和log两种形式
# 实盘中,只写入数据库中
d = deepcopy(bar.__dict__)
d["account_id"] = self.account_id
d["strategy_name"] = strategy.strategy_name
d["exchange"] = d["exchange"].value
d["interval"] = d["interval"].value
flt = {
"vt_symbol": d["vt_symbol"],
"interval": d["interval"],
"datetime": d["datetime"],
}
self.db_queue.put(["update", self.account_id, "Bar_Data", d, flt])
# =================================
def process_order_event(self, event: Event):
"""处理order事件"""
order = event.data
d = deepcopy(order.__dict__)
# 先转换order
self.offset_converter.update_order(order)
# 先根据订单号返回对应的策略
strategy = self.orderid_strategy_map.get(order.vt_orderid, None)
if not strategy:
self.write_log("非程序化策略订单:" + str(d))
return
# =================================
# 这里在测试的时候,写入数据库和log两种形式
# 实盘中,只写入数据库中
d["account_id"] = self.account_id
d["strategy_name"] = strategy.strategy_name
d["exchange"] = d["exchange"].value
d["type"] = d["type"].value
d["direction"] = d["direction"].value
d["offset"] = d["offset"].value
d["status"] = d["status"].value
flt = {
"vt_orderid": d["vt_orderid"],
"volume": d["volume"],
"status": d["status"],
}
self.db_queue.put(["update", self.account_id, "Order_Data", d, flt])
self.write_log("Order_Data:" + str(d), strategy)
# =================================
# Remove vt_orderid if order is no longer active.
# 如果order_id已经成交了或者撤销了,删除这个订单。
# 先返回所有这个策略的订单号
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if order.vt_orderid in vt_orderids and not order.is_active():
vt_orderids.remove(order.vt_orderid)
# For server stop order, call strategy on_stop_order function
# 如果是停止单,推送停止单到对应的策略中去
if order.type == OrderType.STOP:
so = StopOrder(
vt_symbol=order.vt_symbol,
direction=order.direction,
offset=order.offset,
price=order.price,
volume=order.volume,
stop_orderid=order.vt_orderid,
strategy_name=strategy.strategy_name,
status=STOP_STATUS_MAP[order.status],
vt_orderids=[order.vt_orderid],
)
self.call_strategy_func(strategy, strategy.on_stop_order, so)
# Call strategy on_order function
# 最后,不管是停止单,还是删除编号的操作,最后都要调用策略的on_order操作
self.call_strategy_func(strategy, strategy.on_order, order)
def process_trade_event(self, event: Event):
"""处理成交事件"""
trade = event.data
d = deepcopy(trade.__dict__)
# Filter duplicate trade push
# 如果推送过来的成交,不是此次运行期间的单子
if trade.vt_tradeid in self.vt_tradeids:
return
# 将成交的订单添加至vt_tradeids 添加至本引擎中去
self.vt_tradeids.add(trade.vt_tradeid)
# 转换成交
self.offset_converter.update_trade(trade)
# 获取这个成交对应的策略
strategy = self.orderid_strategy_map.get(trade.vt_orderid, None)
if not strategy:
self.write_log("非程序化策略成交:" + str(d))
return
# =================================
# 这里在测试的时候,写入数据库和log两种形式
# 实盘中,只写入数据库中
d["account_id"] = self.account_id
d["strategy_name"] = strategy.strategy_name
d["exchange"] = d["exchange"].value
d["direction"] = d["direction"].value
d["offset"] = d["offset"].value
flt = {
"vt_orderid": d["vt_orderid"],
"vt_tradeid": d["vt_tradeid"],
}
self.db_queue.put(["update", self.account_id, "Trade_Data", d, flt])
self.write_log("Trade_Data:" + str(d), strategy)
# =================================
# 如果是多单,计算持仓时符号为正,如果是空单,计算持仓时符号为负
if trade.direction == Direction.LONG:
strategy.pos[trade.vt_symbol] += trade.volume
else:
strategy.pos[trade.vt_symbol] -= trade.volume
# 仓位variable每次更新完都要在本地更新
self.sync_strategy_data(strategy)
# 调用策略函数
self.call_strategy_func(strategy, strategy.on_trade, trade)
# 发送策略事件,也就是策略有任何一项改变,就要发出,这个在double_ma_strategy内中
# on_start on_bar on_init on_stop中,只要策略发生一些东西的变化,就触发put_strategy_event这个函数
self.put_strategy_event(strategy)
def process_position_event(self, event: Event):
"""处理仓位 事件"""
position = event.data
d = deepcopy(position.__dict__)
# 就是把他转换,然后更新,就好了
self.offset_converter.update_position(position)
# =================================
# 这里在测试的时候,写入数据库和log两种形式
# 实盘中,只写入数据库中
d["account_id"] = self.account_id
d["exchange"] = d["exchange"].value
d["direction"] = d["direction"].value
d["datetime"] = copy(datetime.now().strftime("%Y-%m-%d %H:%M:%S.%fZ"))
if d["volume"] > 0:
self.db_queue.put(["insert", self.account_id, "Position_Data", d])
self.write_log("Position_Data:" + str(d))
# =================================
def process_account_event(self, event: Event):
"""处理账户 事件"""
account = event.data
d = deepcopy(account.__dict__)
# =================================
# 这里在测试的时候,写入数据库和log两种形式
# 实盘中,只写入数据库中
d["account_id"] = self.account_id
d["datetime"] = copy(datetime.now().strftime("%Y-%m-%d %H:%M:%S.%fZ"))
if d["balance"] > 0:
self.db_queue.put(["insert", self.account_id, "Account_Data", d])
self.write_log("Account_Data:" + str(d))
# =================================
def check_stop_order(self, tick: TickData):
"""检查停止单,每次收到tick的时候都要检查"""
# 对于所有的当前停止单
for stop_order in list(self.stop_orders.values()):
# 如果停止单的vt_symbol与tick不同
if stop_order.vt_symbol != tick.vt_symbol:
# 进行下一个循环
continue
# 为了保证下单,要查看tick内涨停价和5档价格,如果都没有,返回
if not tick.limit_up and not tick.bid_price_5:
continue
# 多头停止单,tick价格上穿止损价
# 如果是buy的LONG + OPEN 的STOPORDER,当价格向上突破某一个价格的时候开仓
# 如果是cover的LONG + CLOSE 的STOPORDER,当价格向上突破某一个价格的时候平仓,意思是止损
long_triggered = (
stop_order.direction == Direction.LONG and tick.last_price >= stop_order.price
)
# 空头停止单,tick价格下穿止损价
# 如果是short的SHORT + OPEN 的STOPORDER,当价格向下突破某一个价格的时候开仓
# 如果是sell的SHORT + CLOSE 的STOPORDER,当价格向下突破某一个价格的时候平仓,意思是止损
short_triggered = (
stop_order.direction == Direction.SHORT and tick.last_price <= stop_order.price
)
# 如果有触发条件的话
if long_triggered or short_triggered:
# 是哪个策略的停止单
strategy = self.strategies[stop_order.strategy_name]
# To get excuted immediately after stop order is
# triggered, use limit price if available, otherwise
# use ask_price_5 or bid_price_5
# 停止单的下一步处理,如果有涨跌停,按涨跌停下单,如果没有涨跌停,按买五卖五价格下单
if stop_order.direction == Direction.LONG:
if tick.limit_up:
price = tick.limit_up
else:
price = tick.ask_price_5
else:
if tick.limit_down:
price = tick.limit_down
else:
price = tick.bid_price_5
# 获取主引擎中对应的合约,包括交易所和代码
contract = self.main_engine.get_contract(stop_order.vt_symbol)
# stoporder的本质也是转换stop_order->limit_order
vt_orderids = self.send_limit_order(
strategy,
contract,
stop_order.direction,
stop_order.offset,
price,
stop_order.volume,
stop_order.lock
)
# 正常发单,会返回order_ids
# Update stop order status if placed successfully
if vt_orderids:
# Remove from relation map.
# 如果下的本地单被以限价单的形式取代了,就删掉本地停止单
self.stop_orders.pop(stop_order.stop_orderid)
# 获取下单的策略order_id
strategy_vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_order.stop_orderid in strategy_vt_orderids:
strategy_vt_orderids.remove(stop_order.stop_orderid)
# Change stop order status to cancelled and update to strategy.
# 改变此下单的状态,变成已触发
stop_order.status = StopOrderStatus.TRIGGERED
stop_order.vt_orderids = vt_orderids
# 调用on_stop_order
self.call_strategy_func(
strategy, strategy.on_stop_order, stop_order
)
self.put_stop_order_event(stop_order)
def send_server_order(
self,
strategy: MultiFactorTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
type: OrderType,
lock: bool
):
"""
Send a new order to server.
服务器发单,有些服务器支持 云止损单 有些不支持 支持的就可以用这个
"""
# Create request and send order.
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=type,
price=price,
volume=volume,
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.orderid_strategy_map[vt_orderid] = strategy
self.strategy_orderid_map[strategy.strategy_name].add(vt_orderid)
return vt_orderids
def send_limit_order(
self,
strategy: MultiFactorTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Send a limit order to server.
限价单永远可以向服务器发送
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.LIMIT,
lock
)
def send_server_stop_order(
self,
strategy: MultiFactorTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Send a stop order to server.
Should only be used if stop order supported
on the trading server.
向服务器发送停止单,需要服务器支持
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.STOP,
lock
)
def send_local_stop_order(
self,
strategy: MultiFactorTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Create a new local stop order.
创建一个本地停止单,服务器不支持的时候用
"""
self.stop_order_count += 1
stop_orderid = f"{STOPORDER_PREFIX}.{self.stop_order_count}"
stop_order = StopOrder(
vt_symbol=strategy.vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop_orderid=stop_orderid,
strategy_name=strategy.strategy_name,
lock=lock
)
self.stop_orders[stop_orderid] = stop_order
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
vt_orderids.add(stop_orderid)
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
return stop_orderid
def cancel_server_order(self, strategy: MultiFactorTemplate, vt_orderid: str):
"""
Cancel existing order by vt_orderid.
撤单,取消服务器的单子
"""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_log(f"撤单失败,找不到委托{vt_orderid}", strategy)
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def cancel_local_stop_order(self, strategy: MultiFactorTemplate, stop_orderid: str):
"""
Cancel a local stop order.
撤销本地停止单
"""
stop_order = self.stop_orders.get(stop_orderid, None)
if not stop_order:
return
strategy = self.strategies[stop_order.strategy_name]
# Remove from relation map.
self.stop_orders.pop(stop_orderid)
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_orderid in vt_orderids:
vt_orderids.remove(stop_orderid)
# Change stop order status to cancelled and update to strategy.
stop_order.status = StopOrderStatus.CANCELLED
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
def send_order(
self,
strategy: MultiFactorTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool,
lock: bool
):
"""
cta的下单
"""
contract = self.main_engine.get_contract(strategy.vt_symbol)
if not contract:
self.write_log(f"委托失败,找不到合约:{strategy.vt_symbol}", strategy)
return ""
# Round order price and volume to nearest incremental value
# 价格,首先要对价格处理,变成可下单的价格,最小变动要保持一致
price = round_to(price, contract.pricetick)
volume = round_to(volume, contract.min_volume)
# 如果是停止单
if stop:
# 如果服务器支持,发服务器的单子,从这里可以看出,如果是支持服务器停止单的,要写在查询合约的回调函数内
if contract.stop_supported:
return self.send_server_stop_order(strategy, contract, direction, offset, price, volume, lock)
# 如果服务器不支持,发本地的单子
else:
return self.send_local_stop_order(strategy, direction, offset, price, volume, lock)
else:
# 如果不是停止单,就是限价单
return self.send_limit_order(strategy, contract, direction, offset, price, volume, lock)
def cancel_order(self, strategy: MultiFactorTemplate, vt_orderid: str):
"""
取消下单
取消本地停止单
取消服务器停止单
"""
if vt_orderid.startswith(STOPORDER_PREFIX):
self.cancel_local_stop_order(strategy, vt_orderid)
else:
self.cancel_server_order(strategy, vt_orderid)
def cancel_all(self, strategy: MultiFactorTemplate):
"""
Cancel all active orders of a strategy.
一键取消所有的当前订单
"""
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if not vt_orderids:
return
for vt_orderid in copy(vt_orderids):
self.cancel_order(strategy, vt_orderid)
def get_engine_type(self):
"""获取引擎模式,默认实盘模式"""
return self.engine_type
def load_bar(self, vt_symbol: str, days: int, interval: Interval, callback: Callable[[BarData], None]):
"""载入历史bar"""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
# Query bars from RQData by default, if not found, load from database.
# TODO
# 这里CTA的载入历史数据是从米匡那里拿,这个必须修改,没有米匡账号!
# 初步建议修改为在主引擎中发送query_history拿数据,由gateway回调数据
# OKEX的历史数据由OKEX提供,FUTURES的历史数据由数据库提供,每个都不一样,因此,不能在这里统一,要改成在gaetway中分发
bars = self.query_bar_from_rq(symbol, exchange, interval, start, end)
if not bars:
bars = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
for bar in bars:
callback(bar)
def load_tick(self, vt_symbol: str, days: int, callback: Callable[[TickData], None]):
"""同上"""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
ticks = database_manager.load_tick_data(
symbol=symbol,
exchange=exchange,
start=start,
end=end,
)
for tick in ticks:
callback(tick)
def call_strategy_func(self, strategy: MultiFactorTemplate, func: Callable, params: Any = None):
"""
Call function of a strategy and catch any exception raised.
调用策略的函数,基本输入有:
CtaTemplate,也就是每个策略实例,或者说策略模板
Callable,回调函数,比如策略的strategy.on_tick或者strategy.on_order
params,也就是Callable的参数,有些需要回调的参数,on_tick可以给tick on_order可以给order
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg, strategy)
def add_strategy(self, class_name: str, strategy_name: str, setting: dict):
"""
Add a new strategy.
添加一个策略,
class_name,策略类的名称DoubleMaStrategy,这个是模板
strategy_name,这个是实例名称,每个实例不同的名字,一个策略两个参数,就是不同的实例
vt_symbol,这个是实例的品种
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
# 策略类,模板的意思CtaTemplate
strategy_class = self.classes[class_name]
# 创建策略,本地保存,用唯一的策略名称来保存
# 创建一个策略,用一个具体实例,一个策略名称,vt_symbol,setting来创建
# setting更新的是params也就是策略参数,不是策略的variables
# 初始化的时候,就添加策略的params
strategy = strategy_class(self, strategy_name, setting)
self.strategies[strategy_name] = strategy
# Update to setting file.
# 更新策略配置
# 添加classname进setting和vt_symbol
self.update_strategy_setting(strategy_name, setting)
# 加入策略事件
self.put_strategy_event(strategy)
def init_strategy(self, strategy_name: str):
"""
Init a strategy.
"""
# 初始化策略,写成队列的形式,防止初始化时,同一时间初始化太多的策略
self.init_queue.put(strategy_name)
# 如果没有开启线程,开启线程
if not self.init_thread:
self.init_thread = Thread(target=self._init_strategy)
self.init_thread.start()
def _init_strategy(self):
"""
Init strategies in queue.
"""
# 初始化策略的内部接口,对外不暴露
while not self.init_queue.empty():
strategy_name = self.init_queue.get()
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
continue
# 设置策略初始化为真,如果此处不开始,则query_history数据回调用不了
# strategy.inited = True
self.write_log(f"{strategy_name}开始执行初始化")
# Call on_init function of strategy
# 对每个策略调用回调函数on_init
#self.call_strategy_func(strategy, strategy.on_init)
#self.write_log("engine start flag")
# Restore strategy data(variables)
# 策略数据,获取策略数据
# 这里的data指的是策略的variables
data = self.strategy_data.get(strategy_name, None)
if data:
# 策略的variables
for name in strategy.variables:
value = data.get(name, None)
if value:
# 设置策略,名称,值 = strategy.name = value
setattr(strategy, name, value)
self.call_strategy_func(strategy, strategy.on_init)
# Subscribe market data
# 初始化,订阅合约
# 由于OKEX Futures的本地机制,所有的订阅,不在这里写,在gateway里面写
# Put event to update init completed status.
# 设置策略初始化为真
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
# 由于初始化时间较长,因此每个设置完成之后,都要把线程关闭,以免浪费内存资源,否则策略加载过多,造成资源浪费
self.init_thread = None
def start_strategy(self, strategy_name: str):
"""
Start a strategy.
开始运行策略
"""
# 运行策略
strategy = self.strategies[strategy_name]
# 如果策略还没有启动,先启动初始化
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
print(f"策略{strategy.strategy_name}启动失败,请先初始化" + str(strategy.__dict__))
return
# 如果策略已经在运行,无需重复启动
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
# 开启策略
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
# 发送策略事件
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
"""
Stop a strategy.
停止策略
"""
# 读取策略
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
# Call on_stop function of the strategy
# 调用on_stop策略
self.call_strategy_func(strategy, strategy.on_stop)
# Change trading status of strategy to False
# 先关闭策略开启状态
strategy.trading = False
# Cancel all orders of the strategy
# 取消所有的订单
self.cancel_all(strategy)
# Update GUI
# 向UI内发送事件
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
编辑策略,也就是策略配置更新,一般用不到,除非策略停止
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
移除一个策略
"""
strategy = self.strategies[strategy_name]
# 移除前先停止,否则报错
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove setting
# 移除配置
self.remove_strategy_setting(strategy_name)
# 从活动里面移除
# Remove from active orderid map
if strategy_name in self.strategy_orderid_map:
vt_orderids = self.strategy_orderid_map.pop(strategy_name)
# Remove vt_orderid strategy map
for vt_orderid in vt_orderids:
if vt_orderid in self.orderid_strategy_map:
self.orderid_strategy_map.pop(vt_orderid)
# Remove from strategies
self.strategies.pop(strategy_name)
return True
def load_strategy_class(self):
"""
Load strategy class from source code.
动态载入策略类,可以从两个地方载入:
"""
# 这里提供两个路径,实盘中,可以存在一个路径,也可以存在另一个路径,都可以
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.multifactor_strategy.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
从文件夹载入策略
所有带.py结尾的都是正常策略
"""
# dirpath是文件的路径,dirnames不知道,filenames是文件内所有的策略名称
for dirpath, dirnames, filenames in os.walk(str(path)):
# 要提取的是策略
for filename in filenames:
# 如果以.py为结尾,这里有个问题,就是__init__.py也是以这个为结尾,不严谨,尽管在下面的过程中排除掉
if filename.endswith(".py"):
# 策略名称,也就是.py的文件名称
strategy_module_name = ".".join(
[module_name, filename.replace(".py", "")])
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
从模块中载入策略,主要为improtlib模块的本地实现
"""
try:
module = importlib.import_module(module_name)
# 取这个模块内的所有东西
for name in dir(module):
# 获取模块的值
value = getattr(module, name)
# 1.是个type类型
# 2.是CtaTemplate的子类
# 3.不是CtaTemplate
if (isinstance(value, type) and
issubclass(value, MultiFactorTemplate) and
value is not MultiFactorTemplate):
# 每个值得名称就是他本身,比如AtrRsiStrategy
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_data(self):
"""
Load strategy data from json file.
策略数据,策略的数据指的是什么,如果
"""
results = self.db_mongo.dbQuery(self.account_id, self.data_dbname, {})
for result in results:
self.strategy_data[result["strategy_name"]] = result["data"]
# self.strategy_data = load_json(self.data_filename)
def sync_strategy_data(self, strategy: MultiFactorTemplate):
"""
Sync strategy data into json file.
同步策略数据到本地,I/O操作,要小心一点,每个成交都要修改
"""
data = strategy.get_variables()
data.pop("inited") # Strategy status (inited, trading) should not be synced.
data.pop("trading")
self.strategy_data[strategy.strategy_name] = data
d = {
"strategy_name": strategy.strategy_name,
"data": data,
}
flt = {"strategy_name": strategy.strategy_name}
self.db_queue.put(["update", self.account_id, self.data_dbname, d, flt, True])
#save_json(self.data_filename, self.strategy_data)
def get_all_strategy_class_names(self):
"""
Return names of strategy classes loaded.
获取所有策略的类名
"""
return list(self.classes.keys())
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
获取所有策略的参数类
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
获取策略参数
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def init_all_strategies(self):
"""
初始化所有策略
"""
for strategy_name in self.strategies.keys():
self.init_strategy(strategy_name)
def start_all_strategies(self):
"""
启动所有策略
"""
for strategy_name in self.strategies.keys():
self.start_strategy(strategy_name)
def stop_all_strategies(self):
"""
停止所有策略
"""
for strategy_name in self.strategies.keys():
self.stop_strategy(strategy_name)
def load_strategy_setting(self):
"""
Load setting file.
载入策略配置文件
这里修改很大,变成与数据库交互
"""
results = self.db_mongo.dbQuery(self.account_id, self.setting_dbname, {})
for result in results:
self.add_strategy(
result["class_name"],
result["strategy_name"],
result["setting"]
)
def update_strategy_setting(self, strategy_name: str, setting: dict):
"""
Update setting file.
更新策略配置到本地
"""
strategy = self.strategies[strategy_name]
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"setting": setting,
}
d = {
"strategy_name": strategy_name,
"class_name": strategy.__class__.__name__,
"setting": setting,
}
flt = {
"strategy_name": strategy_name,
"class_name": strategy.__class__.__name__,
}
self.db_queue.put(["update", self.account_id, self.setting_dbname, d, flt, True])
def remove_strategy_setting(self, strategy_name: str):
"""
Remove setting file.
移除策略配置
"""
if strategy_name not in self.strategy_setting:
return
# 删除策略配置,同样也需要save_json
self.strategy_setting.pop(strategy_name)
flt = {
"strategy_name": strategy_name
}
self.db_mongo.dbDelete(self.account_id, self.setting_dbname, flt)
def put_stop_order_event(self, stop_order: StopOrder):
"""
Put an event to update stop order status.
"""
# 发送停止单事件
event = Event(EVENT_MULTIFACTOR_STOPORDER, stop_order)
self.event_engine.put(event)
def put_strategy_event(self, strategy: MultiFactorTemplate):
"""
Put an event to update strategy status.
"""
# 发送策略事件
data = strategy.get_data()
event = Event(EVENT_MULTIFACTOR_STRATEGY, data)
self.event_engine.put(event)
def write_log(self, msg: str, strategy: MultiFactorTemplate = None):
"""
Create cta engine log event.
"""
if strategy:
msg = f"{strategy.strategy_name} -> {msg}"
d = {
"datetime": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
"gateway_name": "CtaStrategy",
"msg": msg
}
self.db_queue.put(["insert", self.account_id, "Log", d])
def send_email(self, msg: str, strategy: MultiFactorTemplate = None):
"""
Send email to default receiver.
发送Email
"""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "CTA策略引擎"
self.main_engine.send_email(subject, msg)
def db_start(self):
"""开启DB的线程"""
self.db_active = True
self.db_thread = Thread(target=self.db_run)
self.db_thread.start()
def db_stop(self):
self.db_active = False
def db_run(self):
"""数据库线程的运行"""
while self.db_active:
try:
task = self.db_queue.get(timeout=1)
if task[0] == "update":
dbName = task[1]
collectionName = task[2]
d = task[3]
flt = task[4]
self.db_mongo.dbUpdate(dbName, collectionName, d, flt, True)
elif task[0] == "insert":
dbName = task[1]
collectionName = task[2]
d = task[3]
self.db_mongo.dbInsert(dbName, collectionName, d)
# task_type, data = task
except Empty:
self.db_count += 1
# 设定每隔一小时左右
if self.db_count >= 3600:
while True:
try:
info = psutil.virtual_memory()
self.write_log('重启内存使用:' + str(psutil.Process(os.getpid()).memory_info().rss))
self.write_log('重启总内存:' + str(info.total))
self.write_log('重启内存占比:' + str(info.percent))
self.write_log('重启cpu个数:' + str(psutil.cpu_count()))
self.write_log("数据库开始重启!!!")
# 先将run关闭
self.db_active = False
# 正常关闭Mongodb的连接
self.db_mongo.dbClient.close()
self.db_mongo = None
# 重新开启dbMongo()
self.db_mongo = dbMongo(self.db_name, self.db_pwd)
# 重新开启run
self.db_active = True
self.write_log("数据库重启成功!!!")
self.db_count = 0
break
except Exception as e:
self.write_log("数据库问题" + str(e))
except Exception as e:
self.write_log(str(e))
| 32.025244 | 110 | 0.57383 |
import importlib
import os
import traceback
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable
from datetime import datetime, timedelta
from threading import Thread
from queue import Queue, Empty
from copy import copy, deepcopy
import time
import psutil
import os
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
OrderRequest,
SubscribeRequest,
HistoryRequest,
LogData,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_BAR,
EVENT_ACCOUNT
)
from vnpy.trader.constant import (
Direction,
OrderType,
Interval,
Exchange,
Offset,
Status
)
from vnpy.trader.utility import load_json, save_json, extract_vt_symbol, round_to
from vnpy.trader.database import database_manager
from .base import (
APP_NAME,
EVENT_MULTIFACTOR_LOG,
EVENT_MULTIFACTOR_STRATEGY,
EVENT_MULTIFACTOR_STOPORDER,
EngineType,
StopOrder,
StopOrderStatus,
STOPORDER_PREFIX
)
from .template import MultiFactorTemplate
from .converter import OffsetConverter
from .DBMongo import dbMongo
STOP_STATUS_MAP = {
Status.SUBMITTING: StopOrderStatus.WAITING,
Status.NOTTRADED: StopOrderStatus.WAITING,
Status.PARTTRADED: StopOrderStatus.TRIGGERED,
Status.ALLTRADED: StopOrderStatus.TRIGGERED,
Status.CANCELLED: StopOrderStatus.CANCELLED,
Status.REJECTED: StopOrderStatus.CANCELLED
}
class MultiFactorEngine(BaseEngine):
engine_type = EngineType.LIVE
setting_filename = "multifactor_setting.json"
data_filename = "multifactor_data.json"
setting_dbname = "multifactor_strategy_setting"
data_dbname = "multifactor_strategy_data"
account_id = "mytest"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
super(MultiFactorEngine, self).__init__(
main_engine, event_engine, APP_NAME)
self.strategy_setting = {}
self.strategy_data = {}
self.classes = {}
self.strategies = {}
self.orderid_strategy_map = {}
self.strategy_orderid_map = defaultdict(
set)
self.stop_order_count = 0
self.stop_orders = {}
self.init_thread = None
self.init_queue = Queue()
self.rq_client = None
self.rq_symbols = set()
self.vt_tradeids = set()
self.offset_converter = OffsetConverter(self.main_engine)
self.db_mongo = dbMongo()
self.db_thread = None
self.db_queue = Queue()
self.db_active = False
self.db_count = 0
def init_engine(self):
self.db_start()
self.load_strategy_class()
self.load_strategy_setting()
self.load_strategy_data()
self.register_event()
self.write_log("CTA策略引擎初始化成功")
def account_id_change(self, new_id):
self.account_id = new_id
def init_dbmongo(self, name=None, password=None, ip="localhost", port="27017"):
self.db_name = name
self.db_pwd = password
self.db_mongo = dbMongo(name, password, ip, port)
def close(self):
self.stop_all_strategies()
def register_event(self):
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_BAR, self.process_bar_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
def process_tick_event(self, event: Event):
tick = event.data
strategies = self.strategies.values()
if not strategies:
return
self.check_stop_order(tick)
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_tick, tick)
def process_bar_event(self, event: Event):
bar = deepcopy(event.data)
strategies = self.strategies.values()
if not strategies:
return
for strategy in strategies:
self.call_strategy_func(strategy, strategy.on_bar, bar)
d = deepcopy(bar.__dict__)
d["account_id"] = self.account_id
d["strategy_name"] = strategy.strategy_name
d["exchange"] = d["exchange"].value
d["interval"] = d["interval"].value
flt = {
"vt_symbol": d["vt_symbol"],
"interval": d["interval"],
"datetime": d["datetime"],
}
self.db_queue.put(["update", self.account_id, "Bar_Data", d, flt])
def process_order_event(self, event: Event):
order = event.data
d = deepcopy(order.__dict__)
self.offset_converter.update_order(order)
strategy = self.orderid_strategy_map.get(order.vt_orderid, None)
if not strategy:
self.write_log("非程序化策略订单:" + str(d))
return
d["account_id"] = self.account_id
d["strategy_name"] = strategy.strategy_name
d["exchange"] = d["exchange"].value
d["type"] = d["type"].value
d["direction"] = d["direction"].value
d["offset"] = d["offset"].value
d["status"] = d["status"].value
flt = {
"vt_orderid": d["vt_orderid"],
"volume": d["volume"],
"status": d["status"],
}
self.db_queue.put(["update", self.account_id, "Order_Data", d, flt])
self.write_log("Order_Data:" + str(d), strategy)
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if order.vt_orderid in vt_orderids and not order.is_active():
vt_orderids.remove(order.vt_orderid)
if order.type == OrderType.STOP:
so = StopOrder(
vt_symbol=order.vt_symbol,
direction=order.direction,
offset=order.offset,
price=order.price,
volume=order.volume,
stop_orderid=order.vt_orderid,
strategy_name=strategy.strategy_name,
status=STOP_STATUS_MAP[order.status],
vt_orderids=[order.vt_orderid],
)
self.call_strategy_func(strategy, strategy.on_stop_order, so)
self.call_strategy_func(strategy, strategy.on_order, order)
def process_trade_event(self, event: Event):
trade = event.data
d = deepcopy(trade.__dict__)
if trade.vt_tradeid in self.vt_tradeids:
return
self.vt_tradeids.add(trade.vt_tradeid)
self.offset_converter.update_trade(trade)
strategy = self.orderid_strategy_map.get(trade.vt_orderid, None)
if not strategy:
self.write_log("非程序化策略成交:" + str(d))
return
d["account_id"] = self.account_id
d["strategy_name"] = strategy.strategy_name
d["exchange"] = d["exchange"].value
d["direction"] = d["direction"].value
d["offset"] = d["offset"].value
flt = {
"vt_orderid": d["vt_orderid"],
"vt_tradeid": d["vt_tradeid"],
}
self.db_queue.put(["update", self.account_id, "Trade_Data", d, flt])
self.write_log("Trade_Data:" + str(d), strategy)
if trade.direction == Direction.LONG:
strategy.pos[trade.vt_symbol] += trade.volume
else:
strategy.pos[trade.vt_symbol] -= trade.volume
self.sync_strategy_data(strategy)
self.call_strategy_func(strategy, strategy.on_trade, trade)
self.put_strategy_event(strategy)
def process_position_event(self, event: Event):
position = event.data
d = deepcopy(position.__dict__)
self.offset_converter.update_position(position)
d["account_id"] = self.account_id
d["exchange"] = d["exchange"].value
d["direction"] = d["direction"].value
d["datetime"] = copy(datetime.now().strftime("%Y-%m-%d %H:%M:%S.%fZ"))
if d["volume"] > 0:
self.db_queue.put(["insert", self.account_id, "Position_Data", d])
self.write_log("Position_Data:" + str(d))
def process_account_event(self, event: Event):
account = event.data
d = deepcopy(account.__dict__)
d["account_id"] = self.account_id
d["datetime"] = copy(datetime.now().strftime("%Y-%m-%d %H:%M:%S.%fZ"))
if d["balance"] > 0:
self.db_queue.put(["insert", self.account_id, "Account_Data", d])
self.write_log("Account_Data:" + str(d))
def check_stop_order(self, tick: TickData):
for stop_order in list(self.stop_orders.values()):
if stop_order.vt_symbol != tick.vt_symbol:
continue
if not tick.limit_up and not tick.bid_price_5:
continue
long_triggered = (
stop_order.direction == Direction.LONG and tick.last_price >= stop_order.price
)
short_triggered = (
stop_order.direction == Direction.SHORT and tick.last_price <= stop_order.price
)
if long_triggered or short_triggered:
strategy = self.strategies[stop_order.strategy_name]
if stop_order.direction == Direction.LONG:
if tick.limit_up:
price = tick.limit_up
else:
price = tick.ask_price_5
else:
if tick.limit_down:
price = tick.limit_down
else:
price = tick.bid_price_5
contract = self.main_engine.get_contract(stop_order.vt_symbol)
vt_orderids = self.send_limit_order(
strategy,
contract,
stop_order.direction,
stop_order.offset,
price,
stop_order.volume,
stop_order.lock
)
if vt_orderids:
self.stop_orders.pop(stop_order.stop_orderid)
strategy_vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_order.stop_orderid in strategy_vt_orderids:
strategy_vt_orderids.remove(stop_order.stop_orderid)
stop_order.status = StopOrderStatus.TRIGGERED
stop_order.vt_orderids = vt_orderids
self.call_strategy_func(
strategy, strategy.on_stop_order, stop_order
)
self.put_stop_order_event(stop_order)
def send_server_order(
self,
strategy: MultiFactorTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
type: OrderType,
lock: bool
):
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=type,
price=price,
volume=volume,
)
req_list = self.offset_converter.convert_order_request(original_req, lock)
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
self.orderid_strategy_map[vt_orderid] = strategy
self.strategy_orderid_map[strategy.strategy_name].add(vt_orderid)
return vt_orderids
def send_limit_order(
self,
strategy: MultiFactorTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.LIMIT,
lock
)
def send_server_stop_order(
self,
strategy: MultiFactorTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.STOP,
lock
)
def send_local_stop_order(
self,
strategy: MultiFactorTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
self.stop_order_count += 1
stop_orderid = f"{STOPORDER_PREFIX}.{self.stop_order_count}"
stop_order = StopOrder(
vt_symbol=strategy.vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop_orderid=stop_orderid,
strategy_name=strategy.strategy_name,
lock=lock
)
self.stop_orders[stop_orderid] = stop_order
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
vt_orderids.add(stop_orderid)
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
return stop_orderid
def cancel_server_order(self, strategy: MultiFactorTemplate, vt_orderid: str):
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_log(f"撤单失败,找不到委托{vt_orderid}", strategy)
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def cancel_local_stop_order(self, strategy: MultiFactorTemplate, stop_orderid: str):
stop_order = self.stop_orders.get(stop_orderid, None)
if not stop_order:
return
strategy = self.strategies[stop_order.strategy_name]
self.stop_orders.pop(stop_orderid)
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_orderid in vt_orderids:
vt_orderids.remove(stop_orderid)
stop_order.status = StopOrderStatus.CANCELLED
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
def send_order(
self,
strategy: MultiFactorTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool,
lock: bool
):
contract = self.main_engine.get_contract(strategy.vt_symbol)
if not contract:
self.write_log(f"委托失败,找不到合约:{strategy.vt_symbol}", strategy)
return ""
price = round_to(price, contract.pricetick)
volume = round_to(volume, contract.min_volume)
if stop:
if contract.stop_supported:
return self.send_server_stop_order(strategy, contract, direction, offset, price, volume, lock)
else:
return self.send_local_stop_order(strategy, direction, offset, price, volume, lock)
else:
return self.send_limit_order(strategy, contract, direction, offset, price, volume, lock)
def cancel_order(self, strategy: MultiFactorTemplate, vt_orderid: str):
if vt_orderid.startswith(STOPORDER_PREFIX):
self.cancel_local_stop_order(strategy, vt_orderid)
else:
self.cancel_server_order(strategy, vt_orderid)
def cancel_all(self, strategy: MultiFactorTemplate):
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if not vt_orderids:
return
for vt_orderid in copy(vt_orderids):
self.cancel_order(strategy, vt_orderid)
def get_engine_type(self):
return self.engine_type
def load_bar(self, vt_symbol: str, days: int, interval: Interval, callback: Callable[[BarData], None]):
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
bars = self.query_bar_from_rq(symbol, exchange, interval, start, end)
if not bars:
bars = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
for bar in bars:
callback(bar)
def load_tick(self, vt_symbol: str, days: int, callback: Callable[[TickData], None]):
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
ticks = database_manager.load_tick_data(
symbol=symbol,
exchange=exchange,
start=start,
end=end,
)
for tick in ticks:
callback(tick)
def call_strategy_func(self, strategy: MultiFactorTemplate, func: Callable, params: Any = None):
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg, strategy)
def add_strategy(self, class_name: str, strategy_name: str, setting: dict):
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes[class_name]
strategy = strategy_class(self, strategy_name, setting)
self.strategies[strategy_name] = strategy
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def init_strategy(self, strategy_name: str):
self.init_queue.put(strategy_name)
if not self.init_thread:
self.init_thread = Thread(target=self._init_strategy)
self.init_thread.start()
def _init_strategy(self):
while not self.init_queue.empty():
strategy_name = self.init_queue.get()
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
continue
self.write_log(f"{strategy_name}开始执行初始化")
data = self.strategy_data.get(strategy_name, None)
if data:
for name in strategy.variables:
value = data.get(name, None)
if value:
setattr(strategy, name, value)
self.call_strategy_func(strategy, strategy.on_init)
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
self.init_thread = None
def start_strategy(self, strategy_name: str):
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
print(f"策略{strategy.strategy_name}启动失败,请先初始化" + str(strategy.__dict__))
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
self.call_strategy_func(strategy, strategy.on_stop)
strategy.trading = False
self.cancel_all(strategy)
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
self.remove_strategy_setting(strategy_name)
if strategy_name in self.strategy_orderid_map:
vt_orderids = self.strategy_orderid_map.pop(strategy_name)
for vt_orderid in vt_orderids:
if vt_orderid in self.orderid_strategy_map:
self.orderid_strategy_map.pop(vt_orderid)
self.strategies.pop(strategy_name)
return True
def load_strategy_class(self):
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.multifactor_strategy.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
for dirpath, dirnames, filenames in os.walk(str(path)):
for filename in filenames:
if filename.endswith(".py"):
strategy_module_name = ".".join(
[module_name, filename.replace(".py", "")])
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and
issubclass(value, MultiFactorTemplate) and
value is not MultiFactorTemplate):
self.classes[value.__name__] = value
except:
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_data(self):
results = self.db_mongo.dbQuery(self.account_id, self.data_dbname, {})
for result in results:
self.strategy_data[result["strategy_name"]] = result["data"]
def sync_strategy_data(self, strategy: MultiFactorTemplate):
data = strategy.get_variables()
data.pop("inited")
data.pop("trading")
self.strategy_data[strategy.strategy_name] = data
d = {
"strategy_name": strategy.strategy_name,
"data": data,
}
flt = {"strategy_name": strategy.strategy_name}
self.db_queue.put(["update", self.account_id, self.data_dbname, d, flt, True])
def get_all_strategy_class_names(self):
return list(self.classes.keys())
def get_strategy_class_parameters(self, class_name: str):
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def init_all_strategies(self):
for strategy_name in self.strategies.keys():
self.init_strategy(strategy_name)
def start_all_strategies(self):
for strategy_name in self.strategies.keys():
self.start_strategy(strategy_name)
def stop_all_strategies(self):
for strategy_name in self.strategies.keys():
self.stop_strategy(strategy_name)
def load_strategy_setting(self):
results = self.db_mongo.dbQuery(self.account_id, self.setting_dbname, {})
for result in results:
self.add_strategy(
result["class_name"],
result["strategy_name"],
result["setting"]
)
def update_strategy_setting(self, strategy_name: str, setting: dict):
strategy = self.strategies[strategy_name]
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"setting": setting,
}
d = {
"strategy_name": strategy_name,
"class_name": strategy.__class__.__name__,
"setting": setting,
}
flt = {
"strategy_name": strategy_name,
"class_name": strategy.__class__.__name__,
}
self.db_queue.put(["update", self.account_id, self.setting_dbname, d, flt, True])
def remove_strategy_setting(self, strategy_name: str):
if strategy_name not in self.strategy_setting:
return
self.strategy_setting.pop(strategy_name)
flt = {
"strategy_name": strategy_name
}
self.db_mongo.dbDelete(self.account_id, self.setting_dbname, flt)
def put_stop_order_event(self, stop_order: StopOrder):
event = Event(EVENT_MULTIFACTOR_STOPORDER, stop_order)
self.event_engine.put(event)
def put_strategy_event(self, strategy: MultiFactorTemplate):
data = strategy.get_data()
event = Event(EVENT_MULTIFACTOR_STRATEGY, data)
self.event_engine.put(event)
def write_log(self, msg: str, strategy: MultiFactorTemplate = None):
if strategy:
msg = f"{strategy.strategy_name} -> {msg}"
d = {
"datetime": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
"gateway_name": "CtaStrategy",
"msg": msg
}
self.db_queue.put(["insert", self.account_id, "Log", d])
def send_email(self, msg: str, strategy: MultiFactorTemplate = None):
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "CTA策略引擎"
self.main_engine.send_email(subject, msg)
def db_start(self):
self.db_active = True
self.db_thread = Thread(target=self.db_run)
self.db_thread.start()
def db_stop(self):
self.db_active = False
def db_run(self):
while self.db_active:
try:
task = self.db_queue.get(timeout=1)
if task[0] == "update":
dbName = task[1]
collectionName = task[2]
d = task[3]
flt = task[4]
self.db_mongo.dbUpdate(dbName, collectionName, d, flt, True)
elif task[0] == "insert":
dbName = task[1]
collectionName = task[2]
d = task[3]
self.db_mongo.dbInsert(dbName, collectionName, d)
except Empty:
self.db_count += 1
if self.db_count >= 3600:
while True:
try:
info = psutil.virtual_memory()
self.write_log('重启内存使用:' + str(psutil.Process(os.getpid()).memory_info().rss))
self.write_log('重启总内存:' + str(info.total))
self.write_log('重启内存占比:' + str(info.percent))
self.write_log('重启cpu个数:' + str(psutil.cpu_count()))
self.write_log("数据库开始重启!!!")
self.db_active = False
self.db_mongo.dbClient.close()
self.db_mongo = None
self.db_mongo = dbMongo(self.db_name, self.db_pwd)
self.db_active = True
self.write_log("数据库重启成功!!!")
self.db_count = 0
break
except Exception as e:
self.write_log("数据库问题" + str(e))
except Exception as e:
self.write_log(str(e))
| true | true |
f7fdc17bacc98f3829b24ae777174bfdfee9092a | 2,313 | py | Python | model-optimizer/extensions/front/image_scaler.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | 3 | 2020-02-09T23:25:37.000Z | 2021-01-19T09:44:12.000Z | model-optimizer/extensions/front/image_scaler.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | model-optimizer/extensions/front/image_scaler.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | 2 | 2020-04-18T16:24:39.000Z | 2021-01-19T09:42:19.000Z | """
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.replacement import FrontReplacementOp
from mo.graph.graph import Graph
from mo.ops.const import Const
from extensions.ops.elementwise import Mul, Add
class ImageScaler(FrontReplacementOp):
op = "ImageScaler"
enabled = True
def replace_sub_graph(self, graph: Graph, match: dict):
# This replacer replace ImageScalar operation to Mul->Add sequence
# Also it check that weights and biases are good
op = match['op']
# Check that weights and biases are not useless
has_bias, has_weights = True, True
if all([x == 1 for x in np.nditer(op.scale)]):
has_weights = False
if all([x == 0 for x in np.nditer(op.bias)]):
has_bias = False
assert len(op.in_ports()) == 1
last_port = op.in_port(0).get_source()
# Create Mul & Add nodes
if has_weights:
mul_weights = Const(graph, dict(value=op.scale, shape=op.scale.shape)).create_node()
mul_op = Mul(graph, dict(name=op.id + '/mul_')).create_node()
op.in_port(0).get_connection().set_destination(mul_op.in_port(0))
mul_weights.out_port(0).connect(mul_op.in_port(1))
last_port = mul_op.out_port(0)
if has_bias:
add_bias = Const(graph, dict(value=op.bias, shape=op.bias.shape)).create_node()
add_op = Add(graph, dict(name=op.id + '/add_')).create_node()
last_port.get_connection().set_destination(add_op.in_port(0))
add_bias.out_port(0).connect(add_op.in_port(1))
last_port = add_op.out_port(0)
op.in_port(0).disconnect()
op.out_port(0).get_connection().set_source(last_port)
| 37.306452 | 96 | 0.671422 |
import numpy as np
from mo.front.common.replacement import FrontReplacementOp
from mo.graph.graph import Graph
from mo.ops.const import Const
from extensions.ops.elementwise import Mul, Add
class ImageScaler(FrontReplacementOp):
op = "ImageScaler"
enabled = True
def replace_sub_graph(self, graph: Graph, match: dict):
op = match['op']
has_bias, has_weights = True, True
if all([x == 1 for x in np.nditer(op.scale)]):
has_weights = False
if all([x == 0 for x in np.nditer(op.bias)]):
has_bias = False
assert len(op.in_ports()) == 1
last_port = op.in_port(0).get_source()
if has_weights:
mul_weights = Const(graph, dict(value=op.scale, shape=op.scale.shape)).create_node()
mul_op = Mul(graph, dict(name=op.id + '/mul_')).create_node()
op.in_port(0).get_connection().set_destination(mul_op.in_port(0))
mul_weights.out_port(0).connect(mul_op.in_port(1))
last_port = mul_op.out_port(0)
if has_bias:
add_bias = Const(graph, dict(value=op.bias, shape=op.bias.shape)).create_node()
add_op = Add(graph, dict(name=op.id + '/add_')).create_node()
last_port.get_connection().set_destination(add_op.in_port(0))
add_bias.out_port(0).connect(add_op.in_port(1))
last_port = add_op.out_port(0)
op.in_port(0).disconnect()
op.out_port(0).get_connection().set_source(last_port)
| true | true |
f7fdc18c84175d37b66091643e4c1cc0085e38d1 | 160,652 | py | Python | sympy/matrices/matrices.py | darknight009/sympy | 618193720b862a41aa295d474793cc12e4de2927 | [
"BSD-3-Clause"
] | 1 | 2021-07-24T12:45:14.000Z | 2021-07-24T12:45:14.000Z | sympy/matrices/matrices.py | darknight009/sympy | 618193720b862a41aa295d474793cc12e4de2927 | [
"BSD-3-Clause"
] | null | null | null | sympy/matrices/matrices.py | darknight009/sympy | 618193720b862a41aa295d474793cc12e4de2927 | [
"BSD-3-Clause"
] | 1 | 2021-12-31T12:31:28.000Z | 2021-12-31T12:31:28.000Z | from __future__ import print_function, division
import collections
from sympy.core.add import Add
from sympy.core.basic import Basic, Atom
from sympy.core.expr import Expr
from sympy.core.function import count_ops
from sympy.core.logic import fuzzy_and
from sympy.core.power import Pow
from sympy.core.symbol import Symbol, Dummy, symbols
from sympy.core.numbers import Integer, ilcm, Float
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, default_sort_key, range, \
NotIterable
from sympy.polys import PurePoly, roots, cancel, gcd
from sympy.simplify import simplify as _simplify, signsimp, nsimplify
from sympy.utilities.iterables import flatten, numbered_symbols
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.functions import exp, factorial
from sympy.printing import sstr
from sympy.core.compatibility import reduce, as_int, string_types
from sympy.assumptions.refine import refine
from sympy.core.decorators import call_highest_priority
from types import FunctionType
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
"""Wrong matrix shape"""
pass
class NonSquareMatrixError(ShapeError):
pass
class DeferredVector(Symbol, NotIterable):
"""A vector whose components are deferred (e.g. for use with lambdify)
Examples
========
>>> from sympy import DeferredVector, lambdify
>>> X = DeferredVector( 'X' )
>>> X
X
>>> expr = (X[0] + 2, X[2] + 3)
>>> func = lambdify( X, expr)
>>> func( [1, 2, 3] )
(3, 6)
"""
def __getitem__(self, i):
if i == -0:
i = 0
if i < 0:
raise IndexError('DeferredVector index out of range')
component_name = '%s[%d]' % (self.name, i)
return Symbol(component_name)
def __str__(self):
return sstr(self)
def __repr__(self):
return "DeferredVector('%s')" % (self.name)
class MatrixRequired(object):
"""All subclasses of matrix objects must implement the
required matrix properties listed here."""
rows = None
cols = None
shape = None
_simplify = None
@classmethod
def _new(cls, *args, **kwargs):
"""`_new` must, at minimum, be callable as
`_new(rows, cols, mat) where mat is a flat list of the
elements of the matrix."""
raise NotImplementedError("Subclasses must implement this.")
def __eq__(self, other):
raise NotImplementedError("Subclasses must impliment this.")
def __getitem__(self, key):
"""Implementations of __getitem__ should accept ints, in which
case the matrix is indexed as a flat list, tuples (i,j) in which
case the (i,j) entry is returned, slices, or mixed tuples (a,b)
where a and b are any combintion of slices and integers."""
raise NotImplementedError("Subclasses must implement this.")
def __len__(self):
"""The total number of entries in the matrix."""
raise NotImplementedError("Subclasses must implement this.")
class MatrixShaping(MatrixRequired):
"""Provides basic matrix shaping and extracting of submatrices"""
def _eval_col_insert(self, pos, other):
cols = self.cols
def entry(i, j):
if j < pos:
return self[i, j]
elif pos <= j < pos + other.cols:
return other[i, j - pos]
return self[i, j - pos - other.cols]
return self._new(self.rows, self.cols + other.cols,
lambda i, j: entry(i, j))
def _eval_col_join(self, other):
rows = self.rows
def entry(i, j):
if i < rows:
return self[i, j]
return other[i - rows, j]
return classof(self, other)._new(self.rows + other.rows, self.cols,
lambda i, j: entry(i, j))
def _eval_extract(self, rowsList, colsList):
mat = list(self)
cols = self.cols
indices = (i * cols + j for i in rowsList for j in colsList)
return self._new(len(rowsList), len(colsList),
list(mat[i] for i in indices))
def _eval_get_diag_blocks(self):
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def _eval_row_insert(self, pos, other):
entries = list(self)
insert_pos = pos * self.cols
entries[insert_pos:insert_pos] = list(other)
return self._new(self.rows + other.rows, self.cols, entries)
def _eval_row_join(self, other):
cols = self.cols
def entry(i, j):
if j < cols:
return self[i, j]
return other[i, j - cols]
return classof(self, other)._new(self.rows, self.cols + other.cols,
lambda i, j: entry(i, j))
def _eval_tolist(self):
return [list(self[i,:]) for i in range(self.rows)]
def _eval_vec(self):
rows = self.rows
def entry(n, _):
# we want to read off the columns first
j = n // rows
i = n - j * rows
return self[i, j]
return self._new(len(self), 1, entry)
def col_insert(self, pos, other):
"""Insert one or more columns at the given column position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.col_insert(1, V)
Matrix([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]])
See Also
========
col
row_insert
"""
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(other)
if pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != other.rows:
raise ShapeError(
"self and other must have the same number of rows.")
return self._eval_col_insert(pos, other)
def col_join(self, other):
"""Concatenates two matrices along self's last and other's first row
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.col_join(V)
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 1, 1]])
See Also
========
col
row_join
"""
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(other)
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_col_join(other)
def col(self, j):
"""Elementary column selector.
Examples
========
>>> from sympy import eye
>>> eye(2).col(0)
Matrix([
[1],
[0]])
See Also
========
row
col_op
col_swap
col_del
col_join
col_insert
"""
return self[:, j]
def extract(self, rowsList, colsList):
"""Return a submatrix by specifying a list of rows and columns.
Negative indices can be given. All indices must be in the range
-n <= i < n where n is the number of rows or columns.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 3, range(12))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
>>> m.extract([0, 1, 3], [0, 1])
Matrix([
[0, 1],
[3, 4],
[9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1])
Matrix([
[2],
[2],
[5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1])
Matrix([
[2],
[8]])
RowsList or colsList can also be a list of booleans, in which case
the rows or columns corresponding to the True values will be selected:
>>> m.extract([0, 1, 2, 3], [True, False, True])
Matrix([
[0, 2],
[3, 5],
[6, 8],
[9, 11]])
"""
if not is_sequence(rowsList) or not is_sequence(colsList):
raise TypeError("rowsList and colsList must be iterable")
# ensure rowsList and colsList are lists of integers
if rowsList and all(isinstance(i, bool) for i in rowsList):
rowsList = [index for index, item in enumerate(rowsList) if item]
if colsList and all(isinstance(i, bool) for i in colsList):
colsList = [index for index, item in enumerate(colsList) if item]
# ensure everything is in range
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._eval_extract(rowsList, colsList)
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
return self._eval_get_diag_blocks()
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if self.rows * self.cols != rows * cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self[i * cols + j])
def row_insert(self, pos, other):
"""Insert one or more rows at the given row position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.row_insert(1, V)
Matrix([
[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
See Also
========
row
col_insert
"""
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return self._new(other)
if pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_row_insert(pos, other)
def row_join(self, other):
"""Concatenates two matrices along self's last and rhs's first column
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.row_join(V)
Matrix([
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]])
See Also
========
row
col_join
"""
# Allows you to build a matrix even if it is null matrix
if not self:
return self._new(other)
if self.rows != other.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
return self._eval_row_join(other)
def row(self, i):
"""Elementary row selector.
Examples
========
>>> from sympy import eye
>>> eye(2).row(0)
Matrix([[1, 0]])
See Also
========
col
row_op
row_swap
row_del
row_join
row_insert
"""
return self[i, :]
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy.matrices import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> from sympy import Matrix, ones
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return self._eval_tolist()
def vec(self):
"""Return the Matrix converted into a one column matrix by stacking columns
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 3], [2, 4]])
>>> m
Matrix([
[1, 3],
[2, 4]])
>>> m.vec()
Matrix([
[1],
[2],
[3],
[4]])
See Also
========
vech
"""
return self._eval_vec()
class MatrixProperties(MatrixRequired):
"""Provides basic properties of a matrix."""
def _eval_atoms(self, *types):
result = set()
for i in self:
result.update(i.atoms(*types))
return result
def _eval_free_symbols(self):
return set().union(*(i.free_symbols for i in self))
def _eval_has(self, *patterns):
return any(a.has(*patterns) for a in self)
def _eval_is_anti_symmetric(self, simpfunc):
if not all(simpfunc(self[i, j] + self[j, i]).is_zero for i in range(self.rows) for j in range(self.cols)):
return False
return True
def _eval_is_diagonal(self):
for i in range(self.rows):
for j in range(self.cols):
if i != j and self[i, j]:
return False
return True
def _eval_is_hermetian(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i].conjugate()))
return mat.is_zero
def _eval_is_Identity(self):
def dirac(i, j):
if i == j:
return 1
return 0
return all(self[i, j] == dirac(i, j) for i in range(self.rows) for j in
range(self.cols))
def _eval_is_lower_hessenberg(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 2, self.cols))
def _eval_is_lower(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
def _eval_is_symbolic(self):
return self.has(Symbol)
def _eval_is_symmetric(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i]))
return mat.is_zero
def _eval_is_zero(self):
if any(i.is_zero == False for i in self):
return False
if any(i.is_zero == None for i in self):
return None
return True
def _eval_is_upper_hessenberg(self):
return all(self[i, j].is_zero
for i in range(2, self.rows)
for j in range(i - 1))
def _eval_values(self):
return [i for i in self if not i.is_zero]
def atoms(self, *types):
"""Returns the atoms that form the current object.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import Matrix
>>> Matrix([[x]])
Matrix([[x]])
>>> _.atoms()
{x}
"""
types = tuple(t if isinstance(t, type) else type(t) for t in types)
if not types:
types = (Atom,)
return self._eval_atoms(*types)
@property
def free_symbols(self):
"""Returns the free symbols within the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix([[x], [1]]).free_symbols
{x}
"""
return self._eval_free_symbols()
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import Matrix, SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = Matrix(((1, x), (0.2, 3)))
>>> B = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
>>> B.has(x)
True
>>> B.has(y)
False
>>> B.has(Float)
True
"""
return self._eval_has(*patterns)
def is_anti_symmetric(self, simplify=True):
"""Check if matrix M is an antisymmetric matrix,
that is, M is a square matrix with all M[i, j] == -M[j, i].
When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is
simplified before testing to see if it is zero. By default,
the SymPy simplify function is used. To use a custom function
set simplify to a function that accepts a single argument which
returns a simplified expression. To skip simplification, set
simplify to False but note that although this will be faster,
it may induce false negatives.
Examples
========
>>> from sympy import Matrix, symbols
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_anti_symmetric()
True
>>> x, y = symbols('x y')
>>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0])
>>> m
Matrix([
[ 0, 0, x],
[-y, 0, 0]])
>>> m.is_anti_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y,
... -(x + 1)**2 , 0, x*y,
... -y, -x*y, 0])
Simplification of matrix elements is done by default so even
though two elements which should be equal and opposite wouldn't
pass an equality test, the matrix is still reported as
anti-symmetric:
>>> m[0, 1] == -m[1, 0]
False
>>> m.is_anti_symmetric()
True
If 'simplify=False' is used for the case when a Matrix is already
simplified, this will speed things up. Here, we see that without
simplification the matrix does not appear anti-symmetric:
>>> m.is_anti_symmetric(simplify=False)
False
But if the matrix were already expanded, then it would appear
anti-symmetric and simplification in the is_anti_symmetric routine
is not needed:
>>> m = m.expand()
>>> m.is_anti_symmetric(simplify=False)
True
"""
# accept custom simplification
simpfunc = simplify
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_anti_symmetric(simpfunc)
def is_diagonal(self):
"""Check if matrix is diagonal,
that is matrix in which the entries outside the main diagonal are all zero.
Examples
========
>>> from sympy import Matrix, diag
>>> m = Matrix(2, 2, [1, 0, 0, 2])
>>> m
Matrix([
[1, 0],
[0, 2]])
>>> m.is_diagonal()
True
>>> m = Matrix(2, 2, [1, 1, 0, 2])
>>> m
Matrix([
[1, 1],
[0, 2]])
>>> m.is_diagonal()
False
>>> m = diag(1, 2, 3)
>>> m
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> m.is_diagonal()
True
See Also
========
is_lower
is_upper
is_diagonalizable
diagonalize
"""
return self._eval_is_diagonal()
@property
def is_hermitian(self, simplify=True):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = Matrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
if not self.is_square:
return False
simpfunc = simplify
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
return self._eval_is_hermetian(simpfunc)
@property
def is_Identity(self):
if not self.is_square:
return False
return self._eval_is_Identity()
@property
def is_lower_hessenberg(self):
r"""Checks if the matrix is in the lower-Hessenberg form.
The lower hessenberg matrix has zero entries
above the first superdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]])
>>> a
Matrix([
[1, 2, 0, 0],
[5, 2, 3, 0],
[3, 4, 3, 7],
[5, 6, 1, 1]])
>>> a.is_lower_hessenberg
True
See Also
========
is_upper_hessenberg
is_lower
"""
return self._eval_is_lower_hessenberg()
@property
def is_lower(self):
"""Check if matrix is a lower triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_lower
True
>>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4 , 0, 6, 6, 5])
>>> m
Matrix([
[0, 0, 0],
[2, 0, 0],
[1, 4, 0],
[6, 6, 5]])
>>> m.is_lower
True
>>> from sympy.abc import x, y
>>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y])
>>> m
Matrix([
[x**2 + y, x + y**2],
[ 0, x + y]])
>>> m.is_lower
False
See Also
========
is_upper
is_diagonal
is_lower_hessenberg
"""
return self._eval_is_lower()
@property
def is_square(self):
"""Checks if a matrix is square.
A matrix is square if the number of rows equals the number of columns.
The empty matrix is square by definition, since the number of rows and
the number of columns are both zero.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[1, 2, 3], [4, 5, 6]])
>>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> c = Matrix([])
>>> a.is_square
False
>>> b.is_square
True
>>> c.is_square
True
"""
return self.rows == self.cols
def is_symbolic(self):
"""Checks if any elements contain Symbols.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.is_symbolic()
True
"""
return self._eval_is_symbolic()
def is_symmetric(self, simplify=True):
"""Check if matrix is symmetric matrix,
that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry.
They can be skipped using 'simplify=False'; while speeding things a bit,
this may however induce false negatives.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [0, 1, 1, 2])
>>> m
Matrix([
[0, 1],
[1, 2]])
>>> m.is_symmetric()
True
>>> m = Matrix(2, 2, [0, 1, 2, 0])
>>> m
Matrix([
[0, 1],
[2, 0]])
>>> m.is_symmetric()
False
>>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0])
>>> m
Matrix([
[0, 0, 0],
[0, 0, 0]])
>>> m.is_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3])
>>> m
Matrix([
[ 1, x**2 + 2*x + 1, y],
[(x + 1)**2, 2, 0],
[ y, 0, 3]])
>>> m.is_symmetric()
True
If the matrix is already simplified, you may speed-up is_symmetric()
test by using 'simplify=False'.
>>> bool(m.is_symmetric(simplify=False))
False
>>> m1 = m.expand()
>>> m1.is_symmetric(simplify=False)
True
"""
simpfunc = simplify
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_symmetric(simpfunc)
@property
def is_upper_hessenberg(self):
"""Checks if the matrix is the upper-Hessenberg form.
The upper hessenberg matrix has zero entries
below the first subdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]])
>>> a
Matrix([
[1, 4, 2, 3],
[3, 4, 1, 7],
[0, 2, 3, 4],
[0, 0, 1, 3]])
>>> a.is_upper_hessenberg
True
See Also
========
is_lower_hessenberg
is_upper
"""
return self._eval_is_upper_hessenberg()
@property
def is_upper(self):
"""Check if matrix is an upper triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_upper
True
>>> m = Matrix(4, 3, [5, 1, 9, 0, 4 , 6, 0, 0, 5, 0, 0, 0])
>>> m
Matrix([
[5, 1, 9],
[0, 4, 6],
[0, 0, 5],
[0, 0, 0]])
>>> m.is_upper
True
>>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1])
>>> m
Matrix([
[4, 2, 5],
[6, 1, 1]])
>>> m.is_upper
False
See Also
========
is_lower
is_diagonal
is_upper_hessenberg
"""
return all(self[i, j].is_zero
for i in range(1, self.rows)
for j in range(i))
@property
def is_zero(self):
"""Checks if a matrix is a zero matrix.
A matrix is zero if every element is zero. A matrix need not be square
to be considered zero. The empty matrix is zero by the principle of
vacuous truth. For a matrix that may or may not be zero (e.g.
contains a symbol), this will be None
Examples
========
>>> from sympy import Matrix, zeros
>>> from sympy.abc import x
>>> a = Matrix([[0, 0], [0, 0]])
>>> b = zeros(3, 4)
>>> c = Matrix([[0, 1], [0, 0]])
>>> d = Matrix([])
>>> e = Matrix([[x, 0], [0, 0]])
>>> a.is_zero
True
>>> b.is_zero
True
>>> c.is_zero
False
>>> d.is_zero
True
>>> e.is_zero
"""
return self._eval_is_zero()
def values(self):
"""Return non-zero values of self."""
return self._eval_values()
class MatrixOperations(MatrixRequired):
"""Provides basic matrix shape and elementwise
operations. Should not be instantiated directly."""
def _eval_adjoint(self):
return self.transpose().conjugate()
def _eval_conjugate(self):
return self.applyfunc(lambda x: x.conjugate())
def _eval_trace(self):
return sum(self[i, i] for i in range(self.rows))
def _eval_transpose(self):
return self._new(self.cols, self.rows, lambda i, j: self[j, i])
def adjoint(self):
"""Conjugate transpose or Hermitian conjugation."""
return self._eval_adjoint()
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self._new(self.rows, self.cols, [f(x) for x in self])
return out
def conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
return self._eval_conjugate()
def doit(self, **kwargs):
return self.applyfunc(lambda x: x.doit())
def evalf(self, prec=None, **options):
"""Apply evalf() to each element of self."""
return self.applyfunc(lambda i: i.evalf(prec, **options))
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""Apply core.function.expand to each entry of the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix(1, 1, [x*(x+1)])
Matrix([[x*(x + 1)]])
>>> _.expand()
Matrix([[x**2 + x]])
"""
return self.applyfunc(lambda x: x.expand(
deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
**hints))
@property
def H(self):
"""Return Hermite conjugate.
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m
Matrix([
[ 0],
[1 + I],
[ 2],
[ 3]])
>>> m.H
Matrix([[0, 1 - I, 2, 3]])
See Also
========
conjugate: By-element conjugation
D: Dirac conjugation
"""
return self.T.C
def refine(self, assumptions=True):
"""Apply refine to each element of the matrix.
Examples
========
>>> from sympy import Symbol, Matrix, Abs, sqrt, Q
>>> x = Symbol('x')
>>> Matrix([[Abs(x)**2, sqrt(x**2)],[sqrt(x**2), Abs(x)**2]])
Matrix([
[ Abs(x)**2, sqrt(x**2)],
[sqrt(x**2), Abs(x)**2]])
>>> _.refine(Q.real(x))
Matrix([
[ x**2, Abs(x)],
[Abs(x), x**2]])
"""
return self.applyfunc(lambda x: refine(x, assumptions))
def replace(self, F, G, map=False):
"""Replaces Function F in Matrix entries with Function G.
Examples
========
>>> from sympy import symbols, Function, Matrix
>>> F, G = symbols('F, G', cls=Function)
>>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M
Matrix([
[F(0), F(1)],
[F(1), F(2)]])
>>> N = M.replace(F,G)
>>> N
Matrix([
[G(0), G(1)],
[G(1), G(2)]])
"""
return self.applyfunc(lambda x: x.replace(F, G, map))
def simplify(self, ratio=1.7, measure=count_ops):
"""Apply simplify to each element of the matrix.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, cos
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
Matrix([[x*sin(y)**2 + x*cos(y)**2]])
>>> _.simplify()
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.simplify(ratio, measure))
def subs(self, *args, **kwargs): # should mirror core.basic.subs
"""Return a new matrix with subs applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.subs(x, y)
Matrix([[y]])
>>> Matrix(_).subs(y, x)
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.subs(*args, **kwargs))
def trace(self):
"""
Returns the trace of a square matrix i.e. the sum of the
diagonal elements.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.trace()
5
"""
if not self.rows == self.cols:
raise NonSquareMatrixError()
return self._eval_trace()
def transpose(self):
"""
Returns the transpose of the matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.transpose()
Matrix([
[1, 3],
[2, 4]])
>>> from sympy import Matrix, I
>>> m=Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m.transpose()
Matrix([
[ 1, 3],
[2 + I, 4]])
>>> m.T == m.transpose()
True
See Also
========
conjugate: By-element conjugation
"""
return self._eval_transpose()
T = property(transpose, None, None, "Matrix transposition.")
C = property(conjugate, None, None, "By-element conjugation.")
n = evalf
def xreplace(self, rule): # should mirror core.basic.xreplace
"""Return a new matrix with xreplace applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.xreplace({x: y})
Matrix([[y]])
>>> Matrix(_).xreplace({y: x})
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.xreplace(rule))
_eval_simplify = simplify
class MatrixBase(MatrixOperations, MatrixProperties, MatrixShaping):
# Added just for numpy compatibility
__array_priority__ = 11
is_Matrix = True
is_Identity = None
_class_priority = 3
_sympify = staticmethod(sympify)
__hash__ = None # Mutable
def __add__(self, other):
"""Return self + other, raising ShapeError if shapes don't match."""
if getattr(other, 'is_Matrix', False):
A = self
B = other
if A.shape != B.shape:
raise ShapeError("Matrix size mismatch: %s + %s" % (
A.shape, B.shape))
alst = A.tolist()
blst = B.tolist()
ret = [S.Zero] * A.rows
for i in range(A.shape[0]):
ret[i] = [j + k for j, k in zip(alst[i], blst[i])]
rv = classof(A, B)._new(ret)
if 0 in A.shape:
rv = rv.reshape(*A.shape)
return rv
raise TypeError('cannot add matrix and %s' % type(other))
def __array__(self):
from .dense import matrix2numpy
return matrix2numpy(self)
def __div__(self, other):
return self * (S.One / other)
def __getattr__(self, attr):
if attr in ('diff', 'integrate', 'limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc(item_doit)
return doit
else:
raise AttributeError(
"%s has no attribute %s." % (self.__class__.__name__, attr))
def __len__(self):
"""Return the number of elements of self.
Implemented mainly so bool(Matrix()) == False.
"""
return self.rows * self.cols
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i, j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def __mul__(self, other):
"""Return self*other where other is either a scalar or a matrix
of compatible dimensions.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
True
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
Matrix([
[30, 36, 42],
[66, 81, 96]])
>>> B*A
Traceback (most recent call last):
...
ShapeError: Matrices size mismatch.
>>>
See Also
========
matrix_multiply_elementwise
"""
if getattr(other, 'is_Matrix', False):
A = self
B = other
if A.cols != B.rows:
raise ShapeError("Matrix size mismatch: %s * %s." % (
A.shape, B.shape))
if A.cols == 0:
return classof(A, B)._new(A.rows, B.cols, lambda i, j: 0)
try:
blst = B.T.tolist()
except AttributeError:
# If B is a MatrixSymbol, B.T.tolist does not exist
return NotImplemented
alst = A.tolist()
return classof(A, B)._new(A.rows, B.cols, lambda i, j:
reduce(lambda k, l: k + l,
[a_ik * b_kj for a_ik, b_kj in zip(alst[i], blst[j])]))
else:
return self._new(self.rows, self.cols,
[i * other for i in self._mat])
def __neg__(self):
return -1 * self
def _matrix_pow_by_jordan_blocks(self, num):
from sympy.matrices import diag, MutableMatrix
from sympy import binomial
def jordan_cell_power(jc, n):
N = jc.shape[0]
l = jc[0, 0]
if l == 0 and (n < N - 1) != False:
raise ValueError("Matrix det == 0; not invertible")
elif l == 0 and N > 1 and n % 1 != 0:
raise ValueError("Non-integer power cannot be evaluated")
for i in range(N):
for j in range(N-i):
bn = binomial(n, i)
if isinstance(bn, binomial):
bn = bn._eval_expand_func()
jc[j, i+j] = l**(n-i)*bn
P, jordan_cells = self.jordan_cells()
# Make sure jordan_cells matrices are mutable:
jordan_cells = [MutableMatrix(j) for j in jordan_cells]
for j in jordan_cells:
jordan_cell_power(j, num)
return self._new(P*diag(*jordan_cells)*P.inv())
def _matrix_pow_by_recursion(self, num):
from sympy.matrices import eye
n = int(num)
if n < 0:
return self.inv()**-n # A**-2 = (A**-1)**2
a = eye(self.cols)
s = self
while n:
if n % 2:
a *= s
n -= 1
if not n:
break
s *= s
n //= 2
return self._new(a)
def __pow__(self, num):
if not self.is_square:
raise NonSquareMatrixError()
num = sympify(num)
# Conditions to include Integers and Integer valued Floats (eg: 10.0, 26.0, etc.,).
# n = int(num) in _matrix_pow_by_recursion() can only work correctly
# with integer valued numbers.
# Other floats are thus handled by _matrix_pow_by_jordan_blocks().
if num.is_Number and num % 1 == 0:
if (self.rows == 1):
return self._new([[self[0]**num]])
# When certain conditions are met,
# Jordan block algorithm is faster than
# computation by recursion.
elif self.rows == 2 and num > 100000:
try:
return self._matrix_pow_by_jordan_blocks(num)
except ValueError:
pass
return self._matrix_pow_by_recursion(num)
elif isinstance(num, Expr):
return self._matrix_pow_by_jordan_blocks(num)
else:
raise TypeError(
"Only SymPy expressions or integers are supported as exponent for matrices")
def __radd__(self, other):
return self + other
def __repr__(self):
return sstr(self)
def __rmul__(self, a):
if getattr(a, 'is_Matrix', False):
return self._new(a) * self
return self._new(self.rows, self.cols, [a * i for i in self._mat])
def __rsub__(self, a):
return (-self) + a
def __str__(self):
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
return "Matrix(%s)" % str(self.tolist())
def __sub__(self, a):
return self + (-a)
def __truediv__(self, other):
return self.__div__(other)
def _diagonalize_clear_subproducts(self):
del self._is_symbolic
del self._is_symmetric
del self._eigenvects
def _format_str(self, printer=None):
if not printer:
from sympy.printing.str import StrPrinter
printer = StrPrinter()
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
if self.rows == 1:
return "Matrix([%s])" % self.table(printer, rowsep=',\n')
return "Matrix([\n%s])" % self.table(printer, rowsep=',\n')
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
"""Return the number of rows, cols and flat matrix elements.
Examples
========
>>> from sympy import Matrix, I
Matrix can be constructed as follows:
* from a nested list of iterables
>>> Matrix( ((1, 2+I), (3, 4)) )
Matrix([
[1, 2 + I],
[3, 4]])
* from un-nested iterable (interpreted as a column)
>>> Matrix( [1, 2] )
Matrix([
[1],
[2]])
* from un-nested iterable with dimensions
>>> Matrix(1, 2, [1, 2] )
Matrix([[1, 2]])
* from no arguments (a 0 x 0 matrix)
>>> Matrix()
Matrix(0, 0, [])
* from a rule
>>> Matrix(2, 2, lambda i, j: i/(j + 1) )
Matrix([
[0, 0],
[1, 1/2]])
"""
from sympy.matrices.sparse import SparseMatrix
flat_list = None
if len(args) == 1:
# Matrix(SparseMatrix(...))
if isinstance(args[0], SparseMatrix):
return args[0].rows, args[0].cols, flatten(args[0].tolist())
# Matrix(Matrix(...))
elif isinstance(args[0], MatrixBase):
return args[0].rows, args[0].cols, args[0]._mat
# Matrix(MatrixSymbol('X', 2, 2))
elif isinstance(args[0], Basic) and args[0].is_Matrix:
return args[0].rows, args[0].cols, args[0].as_explicit()._mat
# Matrix(numpy.ones((2, 2)))
elif hasattr(args[0], "__array__"):
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = args[0].__array__()
if len(arr.shape) == 2:
rows, cols = arr.shape[0], arr.shape[1]
flat_list = [cls._sympify(i) for i in arr.ravel()]
return rows, cols, flat_list
elif len(arr.shape) == 1:
rows, cols = arr.shape[0], 1
flat_list = [S.Zero] * rows
for i in range(len(arr)):
flat_list[i] = cls._sympify(arr[i])
return rows, cols, flat_list
else:
raise NotImplementedError(
"SymPy supports just 1D and 2D matrices")
# Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
elif is_sequence(args[0]) \
and not isinstance(args[0], DeferredVector):
in_mat = []
ncol = set()
for row in args[0]:
if isinstance(row, MatrixBase):
in_mat.extend(row.tolist())
if row.cols or row.rows: # only pay attention if it's not 0x0
ncol.add(row.cols)
else:
in_mat.append(row)
try:
ncol.add(len(row))
except TypeError:
ncol.add(1)
if len(ncol) > 1:
raise ValueError("Got rows of variable lengths: %s" %
sorted(list(ncol)))
cols = ncol.pop() if ncol else 0
rows = len(in_mat) if cols else 0
if rows:
if not is_sequence(in_mat[0]):
cols = 1
flat_list = [cls._sympify(i) for i in in_mat]
return rows, cols, flat_list
flat_list = []
for j in range(rows):
for i in range(cols):
flat_list.append(cls._sympify(in_mat[j][i]))
elif len(args) == 3:
rows = as_int(args[0])
cols = as_int(args[1])
# Matrix(2, 2, lambda i, j: i+j)
if len(args) == 3 and isinstance(args[2], collections.Callable):
op = args[2]
flat_list = []
for i in range(rows):
flat_list.extend(
[cls._sympify(op(cls._sympify(i), cls._sympify(j)))
for j in range(cols)])
# Matrix(2, 2, [1, 2, 3, 4])
elif len(args) == 3 and is_sequence(args[2]):
flat_list = args[2]
if len(flat_list) != rows * cols:
raise ValueError(
'List length should be equal to rows*columns')
flat_list = [cls._sympify(i) for i in flat_list]
# Matrix()
elif len(args) == 0:
# Empty Matrix
rows = cols = 0
flat_list = []
if flat_list is None:
raise TypeError("Data type not understood")
return rows, cols, flat_list
def _jordan_block_structure(self):
# To every eigenvalue may belong `i` blocks with size s(i)
# and a chain of generalized eigenvectors
# which will be determined by the following computations:
# for every eigenvalue we will add a dictionary
# containing, for all blocks, the blocksizes and the attached chain vectors
# that will eventually be used to form the transformation P
jordan_block_structures = {}
_eigenvects = self.eigenvects()
ev = self.eigenvals()
if len(ev) == 0:
raise AttributeError("could not compute the eigenvalues")
for eigenval, multiplicity, vects in _eigenvects:
l_jordan_chains = {}
geometrical = len(vects)
if geometrical == multiplicity:
# The Jordan chains have all length 1 and consist of only one vector
# which is the eigenvector of course
chains = []
for v in vects:
chain = [v]
chains.append(chain)
l_jordan_chains[1] = chains
jordan_block_structures[eigenval] = l_jordan_chains
elif geometrical == 0:
raise MatrixError(
"Matrix has the eigen vector with geometrical multiplicity equal zero.")
else:
# Up to now we know nothing about the sizes of the blocks of our Jordan matrix.
# Note that knowledge of algebraic and geometrical multiplicity
# will *NOT* be sufficient to determine this structure.
# The blocksize `s` could be defined as the minimal `k` where
# `kernel(self-lI)^k = kernel(self-lI)^(k+1)`
# The extreme case would be that k = (multiplicity-geometrical+1)
# but the blocks could be smaller.
# Consider for instance the following matrix
# [2 1 0 0]
# [0 2 1 0]
# [0 0 2 0]
# [0 0 0 2]
# which coincides with it own Jordan canonical form.
# It has only one eigenvalue l=2 of (algebraic) multiplicity=4.
# It has two eigenvectors, one belonging to the last row (blocksize 1)
# and one being the last part of a jordan chain of length 3 (blocksize of the first block).
# Note again that it is not not possible to obtain this from the algebraic and geometrical
# multiplicity alone. This only gives us an upper limit for the dimension of one of
# the subspaces (blocksize of according jordan block) given by
# max=(multiplicity-geometrical+1) which is reached for our matrix
# but not for
# [2 1 0 0]
# [0 2 0 0]
# [0 0 2 1]
# [0 0 0 2]
# although multiplicity=4 and geometrical=2 are the same for this matrix.
from sympy.matrices import MutableMatrix
I = MutableMatrix.eye(self.rows)
l = eigenval
M = (self - l * I)
# We will store the matrices `(self-l*I)^k` for further computations
# for convenience only we store `Ms[0]=(sefl-lI)^0=I`
# so the index is the same as the power for all further Ms entries
# We also store the vectors that span these kernels (Ns[0] = [])
# and also their dimensions `a_s`
# this is mainly done for debugging since the number of blocks of a given size
# can be computed from the a_s, in order to check our result which is obtained simpler
# by counting the number of Jordan chains for `a` given `s`
# `a_0` is `dim(Kernel(Ms[0]) = dim (Kernel(I)) = 0` since `I` is regular
l_jordan_chains = {}
Ms = [I]
Ns = [[]]
a = [0]
smax = 0
M_new = Ms[-1] * M
Ns_new = M_new.nullspace()
a_new = len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
while a_new > a[
-1]: # as long as the nullspaces increase compute further powers
a.append(a_new)
M_new = Ms[-1] * M
Ns_new = M_new.nullspace()
a_new = len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
smax += 1
# We now have `Ms[-1]=((self-l*I)**s)=Z=0`.
# We also know the size of the biggest Jordan block
# associated with `l` to be `s`.
# Now let us proceed with the computation of the associate part of the transformation matrix `P`.
# We already know the kernel (=nullspace) `K_l` of (self-lI) which consists of the
# eigenvectors belonging to eigenvalue `l`.
# The dimension of this space is the geometric multiplicity of eigenvalue `l`.
# For every eigenvector ev out of `K_l`, there exists a subspace that is
# spanned by the Jordan chain of ev. The dimension of this subspace is
# represented by the length `s` of the Jordan block.
# The chain itself is given by `{e_0,..,e_s-1}` where:
# `e_k+1 =(self-lI)e_k (*)`
# and
# `e_s-1=ev`
# So it would be possible to start with the already known `ev` and work backwards until one
# reaches `e_0`. Unfortunately this can not be done by simply solving system (*) since its matrix
# is singular (by definition of the eigenspaces).
# This approach would force us a choose in every step the degree of freedom undetermined
# by (*). This is difficult to implement with computer algebra systems and also quite inefficient.
# We therefore reformulate the problem in terms of nullspaces.
# To do so we start from the other end and choose `e0`'s out of
# `E=Kernel(self-lI)^s / Kernel(self-lI)^(s-1)`
# Note that `Kernel(self-lI)^s = Kernel(Z) = V` (the whole vector space).
# So in the first step `s=smax` this restriction turns out to actually restrict nothing at all
# and the only remaining condition is to choose vectors in `Kernel(self-lI)^(s-1)`.
# Subsequently we compute `e_1=(self-lI)e_0`, `e_2=(self-lI)*e_1` and so on.
# The subspace `E` can have a dimension larger than one.
# That means that we have more than one Jordan block of size `s` for the eigenvalue `l`
# and as many Jordan chains (this is the case in the second example).
# In this case we start as many Jordan chains and have as many blocks of size `s` in the jcf.
# We now have all the Jordan blocks of size `s` but there might be others attached to the same
# eigenvalue that are smaller.
# So we will do the same procedure also for `s-1` and so on until 1 (the lowest possible order
# where the Jordan chain is of length 1 and just represented by the eigenvector).
for s in reversed(range(1, smax + 1)):
S = Ms[s]
# We want the vectors in `Kernel((self-lI)^s)`,
# but without those in `Kernel(self-lI)^s-1`
# so we will add their adjoints as additional equations
# to the system formed by `S` to get the orthogonal
# complement.
# (`S` will no longer be quadratic.)
exclude_vectors = Ns[s - 1]
for k in range(0, a[s - 1]):
S = S.col_join((exclude_vectors[k]).adjoint())
# We also want to exclude the vectors
# in the chains for the bigger blocks
# that we have already computed (if there are any).
# (That is why we start with the biggest s).
# Since Jordan blocks are not orthogonal in general
# (in the original space), only those chain vectors
# that are on level s (index `s-1` in a chain)
# are added.
for chain_list in l_jordan_chains.values():
for chain in chain_list:
S = S.col_join(chain[s - 1].adjoint())
e0s = S.nullspace()
# Determine the number of chain leaders
# for blocks of size `s`.
n_e0 = len(e0s)
s_chains = []
# s_cells=[]
for i in range(0, n_e0):
chain = [e0s[i]]
for k in range(1, s):
v = M * chain[k - 1]
chain.append(v)
# We want the chain leader appear as the last of the block.
chain.reverse()
s_chains.append(chain)
l_jordan_chains[s] = s_chains
jordan_block_structures[eigenval] = l_jordan_chains
return jordan_block_structures
def _jordan_split(self, algebraical, geometrical):
"""Return a list of integers with sum equal to 'algebraical'
and length equal to 'geometrical'"""
n1 = algebraical // geometrical
res = [n1] * geometrical
res[len(res) - 1] += algebraical % geometrical
assert sum(res) == algebraical
return res
def _setitem(self, key, value):
"""Helper to set value at location given by key.
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
from .dense import Matrix
is_slice = isinstance(key, slice)
i, j = key = self.key2ij(key)
is_mat = isinstance(value, MatrixBase)
if type(i) is slice or type(j) is slice:
if is_mat:
self.copyin_matrix(key, value)
return
if not isinstance(value, Expr) and is_sequence(value):
self.copyin_list(key, value)
return
raise ValueError('unexpected value: %s' % value)
else:
if (not is_mat and
not isinstance(value, Basic) and is_sequence(value)):
value = Matrix(value)
is_mat = True
if is_mat:
if is_slice:
key = (slice(*divmod(i, self.cols)),
slice(*divmod(j, self.cols)))
else:
key = (slice(i, i + value.rows),
slice(j, j + value.cols))
self.copyin_matrix(key, value)
else:
return i, j, self._sympify(value)
return
def add(self, b):
"""Return self + b """
return self + b
def adjugate(self, method="berkowitz"):
"""Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See Also
========
cofactorMatrix
transpose
berkowitz
"""
return self.cofactorMatrix(method).T
def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
"""Computes characteristic polynomial minors using Berkowitz method.
A PurePoly is returned so using different variables for ``x`` does
not affect the comparison or the polynomials:
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y
>>> A = Matrix([[1, 3], [2, 0]])
>>> A.berkowitz_charpoly(x) == A.berkowitz_charpoly(y)
True
Specifying ``x`` is optional; a Dummy with name ``lambda`` is used by
default (which looks good when pretty-printed in unicode):
>>> A.berkowitz_charpoly().as_expr()
_lambda**2 - _lambda - 6
No test is done to see that ``x`` doesn't clash with an existing
symbol, so using the default (``lambda``) or your own Dummy symbol is
the safest option:
>>> A = Matrix([[1, 2], [x, 0]])
>>> A.charpoly().as_expr()
_lambda**2 - _lambda - 2*x
>>> A.charpoly(x).as_expr()
x**2 - 3*x
See Also
========
berkowitz
"""
return PurePoly(list(map(simplify, self.berkowitz()[-1])), x)
def berkowitz_det(self):
"""Computes determinant using Berkowitz method.
See Also
========
det
berkowitz
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
poly = self.berkowitz()[-1]
sign = (-1) ** (len(poly) - 1)
return sign * poly[-1]
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method.
See Also
========
berkowitz
"""
return roots(self.berkowitz_charpoly(Dummy('x')), **flags)
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method.
See Also
========
berkowitz
"""
sign, minors = S.One, []
for poly in self.berkowitz():
minors.append(sign * poly[-1])
sign = -sign
return tuple(minors)
def berkowitz(self):
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently
coefficients of characteristic polynomials of 'self' and all
its square sub-matrices composed by removing both i-th row
and column, without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial, when 'self'
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the following following
definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> M = Matrix([[x, y, z], [1, 0, 0], [y, z, x]])
>>> p, q, r, s = M.berkowitz()
>>> p # 0 x 0 M's sub-matrix
(1,)
>>> q # 1 x 1 M's sub-matrix
(1, -x)
>>> r # 2 x 2 M's sub-matrix
(1, -x, -y)
>>> s # 3 x 3 M's sub-matrix
(1, -2*x, x**2 - y*z - y, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
See Also
========
berkowitz_det
berkowitz_minors
berkowitz_charpoly
berkowitz_eigenvals
"""
from sympy.matrices import zeros
berk = ((1,),)
if not self:
return berk
if not self.is_square:
raise NonSquareMatrixError()
A, N = self, self.rows
transforms = [0] * (N - 1)
for n in range(N, 1, -1):
T, k = zeros(n + 1, n), n - 1
R, C = -A[k, :k], A[:k, k]
A, a = A[:k, :k], -A[k, k]
items = [C]
for i in range(0, n - 2):
items.append(A * items[i])
for i, B in enumerate(items):
items[i] = (R * B)[0, 0]
items = [S.One, a] + items
for i in range(n):
T[i:, i] = items[:n - i + 1]
transforms[k - 1] = T
polys = [self._new([S.One, -A[0, 0]])]
for i, T in enumerate(transforms):
polys.append(T * polys[i])
return berk + tuple(map(tuple, polys))
def cholesky_solve(self, rhs):
"""Solves Ax = B using Cholesky decomposition,
for a general square non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if self.is_symmetric():
L = self._cholesky()
elif self.rows >= self.cols:
L = (self.T * self)._cholesky()
rhs = self.T * rhs
else:
raise NotImplementedError('Under-determined System. '
'Try M.gauss_jordan_solve(rhs)')
Y = L._lower_triangular_solve(rhs)
return (L.T)._upper_triangular_solve(Y)
def cholesky(self):
"""Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T
Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
See Also
========
LDLdecomposition
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._cholesky()
def cofactor(self, i, j, method="berkowitz"):
"""Calculate the cofactor of an element.
See Also
========
cofactorMatrix
minorEntry
minorMatrix
"""
if (i + j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1 * self.minorEntry(i, j, method)
def cofactorMatrix(self, method="berkowitz"):
"""Return a matrix containing the cofactor of each element.
See Also
========
cofactor
minorEntry
minorMatrix
adjugate
"""
out = self._new(self.rows, self.cols, lambda i, j:
self.cofactor(i, j, method))
return out
def columnspace(self, simplify=False):
"""Returns list of vectors (Matrix objects) that span columnspace of self
Examples
========
>>> from sympy.matrices import Matrix
>>> m = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6])
>>> m
Matrix([
[ 1, 3, 0],
[-2, -6, 0],
[ 3, 9, 6]])
>>> m.columnspace()
[Matrix([
[ 1],
[-2],
[ 3]]), Matrix([
[0],
[0],
[6]])]
See Also
========
nullspace
"""
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
reduced, pivots = self.rref(simplify=simpfunc)
basis = []
# create a set of vectors for the basis
for i in range(self.cols):
if i in pivots:
basis.append(self.col(i))
return [self._new(b) for b in basis]
def condition_number(self):
"""Returns the condition number of a matrix.
This is the maximum singular value divided by the minimum singular value
Examples
========
>>> from sympy import Matrix, S
>>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, S.One/10]])
>>> A.condition_number()
100
See Also
========
singular_values
"""
if not self:
return S.Zero
singularvalues = self.singular_values()
return Max(*singularvalues) / Min(*singularvalues)
def as_real_imag(self):
"""Returns a tuple containing the (real, imaginary) part of matrix."""
return self.as_real_imag()
def copy(self):
"""
Returns the copy of a matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.copy()
Matrix([
[1, 2],
[3, 4]])
"""
return self._new(self.rows, self.cols, self._mat)
def cross(self, b):
"""Return the cross product of `self` and `b` relaxing the condition
of compatible dimensions: if each has 3 elements, a matrix of the
same type and shape as `self` will be returned. If `b` has the same
shape as `self` then common identities for the cross product (like
`a x b = - b x a`) will hold.
See Also
========
dot
multiply
multiply_elementwise
"""
if not is_sequence(b):
raise TypeError(
"`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if not (self.rows * self.cols == b.rows * b.cols == 3):
raise ShapeError("Dimensions incorrect for cross product: %s x %s" %
((self.rows, self.cols), (b.rows, b.cols)))
else:
return self._new(self.rows, self.cols, (
(self[1] * b[2] - self[2] * b[1]),
(self[2] * b[0] - self[0] * b[2]),
(self[0] * b[1] - self[1] * b[0])))
@property
def D(self):
"""Return Dirac conjugate (if self.rows == 4).
Examples
========
>>> from sympy import Matrix, I, eye
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m.D
Matrix([[0, 1 - I, -2, -3]])
>>> m = (eye(4) + I*eye(4))
>>> m[0, 3] = 2
>>> m.D
Matrix([
[1 - I, 0, 0, 0],
[ 0, 1 - I, 0, 0],
[ 0, 0, -1 + I, 0],
[ 2, 0, 0, -1 + I]])
If the matrix does not have 4 rows an AttributeError will be raised
because this property is only defined for matrices with 4 rows.
>>> Matrix(eye(2)).D
Traceback (most recent call last):
...
AttributeError: Matrix has no attribute D.
See Also
========
conjugate: By-element conjugation
H: Hermite conjugation
"""
from sympy.physics.matrices import mgamma
if self.rows != 4:
# In Python 3.2, properties can only return an AttributeError
# so we can't raise a ShapeError -- see commit which added the
# first line of this inline comment. Also, there is no need
# for a message since MatrixBase will raise the AttributeError
raise AttributeError
return self.H * mgamma(0)
def det_bareis(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy().as_mutable(), self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0] * M[1, 1] - M[0, 1] * M[1, 0]
elif n == 3:
det = (
M[0, 0] * M[1, 1] * M[2, 2] + M[0, 1] * M[1, 2] * M[2, 0] + M[
0, 2] * M[1, 0] * M[2, 1]) - \
(
M[0, 2] * M[1, 1] * M[2, 0] + M[0, 0] * M[1, 2] * M[2, 1] + M[
0, 1] * M[1, 0] * M[2, 2])
else:
sign = 1 # track current sign in case of column swap
for k in range(n - 1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k + 1, n):
if M[i, k]:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k + 1, n):
for j in range(k + 1, n):
D = M[k, k] * M[i, j] - M[i, k] * M[k, j]
if k > 0:
D /= M[k - 1, k - 1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign * M[n - 1, n - 1]
return det.expand()
def det_LU_decomposition(self):
"""Compute matrix determinant using LU decomposition
Note that this method fails if the LU decomposition itself
fails. In particular, if the matrix has no inverse this method
will fail.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
det_bareis
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy(), self.rows
p, prod = [], 1
l, u, p = M.LUdecomposition()
if len(p) % 2:
prod = -1
for k in range(n):
prod = prod * u[k, k] * l[k, k]
return prod.expand()
def det(self, method="bareis"):
"""Computes the matrix determinant using the method "method".
Possible values for "method":
bareis ... det_bareis
berkowitz ... berkowitz_det
det_LU ... det_LU_decomposition
See Also
========
det_bareis
berkowitz_det
det_LU
"""
# if methods were made internal and all determinant calculations
# passed through here, then these lines could be factored out of
# the method routines
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
elif method == "det_LU":
return self.det_LU_decomposition()
else:
raise ValueError("Determinant method '%s' unrecognized" % method)
def diagonal_solve(self, rhs):
"""Solves Ax = B efficiently, where A is a diagonal Matrix,
with non-zero diagonal entries.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.diagonal_solve(B) == B/2
True
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_diagonal:
raise TypeError("Matrix should be diagonal")
if rhs.rows != self.rows:
raise TypeError("Size mis-match")
return self._diagonal_solve(rhs)
def diagonalize(self, reals_only=False, sort=False, normalize=False):
"""
Return (P, D), where D is diagonal and
D = P^-1 * M * P
where M is current matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> (P, D) = m.diagonalize()
>>> D
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> P
Matrix([
[-1, 0, -1],
[ 0, 0, -1],
[ 2, 1, 2]])
>>> P.inv() * m * P
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
See Also
========
is_diagonal
is_diagonalizable
"""
from sympy.matrices import diag
if not self.is_square:
raise NonSquareMatrixError()
if not self.is_diagonalizable(reals_only, False):
self._diagonalize_clear_subproducts()
raise MatrixError("Matrix is not diagonalizable")
else:
if self._eigenvects is None:
self._eigenvects = self.eigenvects(simplify=True)
if sort:
self._eigenvects.sort(key=default_sort_key)
self._eigenvects.reverse()
diagvals = []
P = self._new(self.rows, 0, [])
for eigenval, multiplicity, vects in self._eigenvects:
for k in range(multiplicity):
diagvals.append(eigenval)
vec = vects[k]
if normalize:
vec = vec / vec.norm()
P = P.col_insert(P.cols, vec)
D = diag(*diagvals)
self._diagonalize_clear_subproducts()
return (P, D)
def diff(self, *args):
"""Calculate the derivative of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.diff(x)
Matrix([
[1, 0],
[0, 0]])
See Also
========
integrate
limit
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].diff(*args))
def dot(self, b):
"""Return the dot product of Matrix self and b relaxing the condition
of compatible dimensions: if either the number of rows or columns are
the same as the length of b then the dot product is returned. If self
is a row or column vector, a scalar is returned. Otherwise, a list
of results is returned (and in that case the number of columns in self
must match the length of b).
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> v = [1, 1, 1]
>>> M.row(0).dot(v)
6
>>> M.col(0).dot(v)
12
>>> M.dot(v)
[6, 15, 24]
See Also
========
cross
multiply
multiply_elementwise
"""
from .dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError(
"Dimensions incorrect for dot product: %s, %s" % (
self.shape, len(b)))
return self.dot(Matrix(b))
else:
raise TypeError(
"`b` must be an ordered iterable or Matrix, not %s." %
type(b))
mat = self
if mat.cols == b.rows:
if b.cols != 1:
mat = mat.T
b = b.T
prod = flatten((mat * b).tolist())
if len(prod) == 1:
return prod[0]
return prod
if mat.cols == b.cols:
return mat.dot(b.T)
elif mat.rows == b.rows:
return mat.T.dot(b)
else:
raise ShapeError("Dimensions incorrect for dot product: %s, %s" % (
self.shape, b.shape))
def dual(self):
"""Returns the dual of a matrix, which is:
`(1/2)*levicivita(i, j, k, l)*M(k, l)` summed over indices `k` and `l`
Since the levicivita method is anti_symmetric for any pairwise
exchange of indices, the dual of a symmetric matrix is the zero
matrix. Strictly speaking the dual defined here assumes that the
'matrix' `M` is a contravariant anti_symmetric second rank tensor,
so that the dual is a covariant second rank tensor.
"""
from sympy import LeviCivita
from sympy.matrices import zeros
M, n = self[:, :], self.rows
work = zeros(n)
if self.is_symmetric():
return work
for i in range(1, n):
for j in range(1, n):
acum = 0
for k in range(1, n):
acum += LeviCivita(i, j, 0, k) * M[0, k]
work[i, j] = acum
work[j, i] = -acum
for l in range(1, n):
acum = 0
for a in range(1, n):
for b in range(1, n):
acum += LeviCivita(0, l, a, b) * M[a, b]
acum /= 2
work[0, l] = -acum
work[l, 0] = acum
return work
def eigenvals(self, **flags):
"""Return eigen values using the berkowitz_eigenvals routine.
Since the roots routine doesn't always work well with Floats,
they will be replaced with Rationals before calling that
routine. If this is not desired, set flag ``rational`` to False.
"""
# roots doesn't like Floats, so replace them with Rationals
# unless the nsimplify flag indicates that this has already
# been done, e.g. in eigenvects
mat = self
if not mat:
return {}
if flags.pop('rational', True):
if any(v.has(Float) for v in mat):
mat = mat._new(mat.rows, mat.cols,
[nsimplify(v, rational=True) for v in mat])
flags.pop('simplify', None) # pop unsupported flag
return mat.berkowitz_eigenvals(**flags)
def eigenvects(self, **flags):
"""Return list of triples (eigenval, multiplicity, basis).
The flag ``simplify`` has two effects:
1) if bool(simplify) is True, as_content_primitive()
will be used to tidy up normalization artifacts;
2) if nullspace needs simplification to compute the
basis, the simplify flag will be passed on to the
nullspace routine which will interpret it there.
If the matrix contains any Floats, they will be changed to Rationals
for computation purposes, but the answers will be returned after being
evaluated with evalf. If it is desired to removed small imaginary
portions during the evalf step, pass a value for the ``chop`` flag.
"""
from sympy.matrices import eye
simplify = flags.get('simplify', True)
primitive = bool(flags.get('simplify', False))
chop = flags.pop('chop', False)
flags.pop('multiple', None) # remove this if it's there
# roots doesn't like Floats, so replace them with Rationals
float = False
mat = self
if any(v.has(Float) for v in self):
float = True
mat = mat._new(mat.rows, mat.cols, [nsimplify(
v, rational=True) for v in mat])
flags['rational'] = False # to tell eigenvals not to do this
out, vlist = [], mat.eigenvals(**flags)
vlist = list(vlist.items())
vlist.sort(key=default_sort_key)
flags.pop('rational', None)
for r, k in vlist:
tmp = mat.as_mutable() - eye(mat.rows) * r
basis = tmp.nullspace()
# whether tmp.is_symbolic() is True or False, it is possible that
# the basis will come back as [] in which case simplification is
# necessary.
if not basis:
# The nullspace routine failed, try it again with simplification
basis = tmp.nullspace(simplify=simplify)
if not basis:
raise NotImplementedError(
"Can't evaluate eigenvector for eigenvalue %s" % r)
if primitive:
# the relationship A*e = lambda*e will still hold if we change the
# eigenvector; so if simplify is True we tidy up any normalization
# artifacts with as_content_primtive (default) and remove any pure Integer
# denominators.
l = 1
for i, b in enumerate(basis[0]):
c, p = signsimp(b).as_content_primitive()
if c is not S.One:
b = c * p
l = ilcm(l, c.q)
basis[0][i] = b
if l != 1:
basis[0] *= l
if float:
out.append((r.evalf(chop=chop), k, [
mat._new(b).evalf(chop=chop) for b in basis]))
else:
out.append((r, k, [mat._new(b) for b in basis]))
return out
def exp(self):
"""Return the exponentiation of a square matrix."""
if not self.is_square:
raise NonSquareMatrixError(
"Exponentiation is valid only for square matrices")
try:
P, cells = self.jordan_cells()
except MatrixError:
raise NotImplementedError(
"Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")
def _jblock_exponential(b):
# This function computes the matrix exponential for one single Jordan block
nr = b.rows
l = b[0, 0]
if nr == 1:
res = exp(l)
else:
from sympy import eye
# extract the diagonal part
d = b[0, 0] * eye(nr)
# and the nilpotent part
n = b - d
# compute its exponential
nex = eye(nr)
for i in range(1, nr):
nex = nex + n ** i / factorial(i)
# combine the two parts
res = exp(b[0, 0]) * nex
return (res)
blocks = list(map(_jblock_exponential, cells))
from sympy.matrices import diag
eJ = diag(*blocks)
# n = self.rows
ret = P * eJ * P.inv()
return type(self)(ret)
def gauss_jordan_solve(self, b, freevar=False):
"""
Solves Ax = b using Gauss Jordan elimination.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, it will
be returned parametrically. If no solutions exist, It will throw
ValueError.
Parameters
==========
b : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
freevar : List
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
values of free variables. Then the index of the free variables
in the solutions (column Matrix) will be returned by freevar, if
the flag `freevar` is set to `True`.
Returns
=======
x : Matrix
The matrix that will satisfy Ax = B. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
params : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
parameters. These arbitrary parameters are returned as params
Matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 1, 1], [1, 2, 2, -1], [2, 4, 0, 6]])
>>> b = Matrix([7, 12, 4])
>>> sol, params = A.gauss_jordan_solve(b)
>>> sol
Matrix([
[-2*_tau0 - 3*_tau1 + 2],
[ _tau0],
[ 2*_tau1 + 5],
[ _tau1]])
>>> params
Matrix([
[_tau0],
[_tau1]])
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> b = Matrix([3, 6, 9])
>>> sol, params = A.gauss_jordan_solve(b)
>>> sol
Matrix([
[-1],
[ 2],
[ 0]])
>>> params
Matrix(0, 1, [])
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
References
==========
.. [1] http://en.wikipedia.org/wiki/Gaussian_elimination
"""
from sympy.matrices import Matrix, zeros
aug = self.hstack(self.copy(), b.copy())
row, col = aug[:, :-1].shape
# solve by reduced row echelon form
A, pivots = aug.rref(simplify=True)
A, v = A[:, :-1], A[:, -1]
pivots = list(filter(lambda p: p < col, pivots))
rank = len(pivots)
# Bring to block form
permutation = Matrix(range(col)).T
A = A.vstack(A, permutation)
for i, c in enumerate(pivots):
A.col_swap(i, c)
A, permutation = A[:-1, :], A[-1, :]
# check for existence of solutions
# rank of aug Matrix should be equal to rank of coefficient matrix
if not v[rank:, 0].is_zero:
raise ValueError("Linear system has no solution")
# Get index of free symbols (free parameters)
free_var_index = permutation[
len(pivots):] # non-pivots columns are free variables
# Free parameters
dummygen = numbered_symbols("tau", Dummy)
tau = Matrix([next(dummygen) for k in range(col - rank)]).reshape(
col - rank, 1)
# Full parametric solution
V = A[:rank, rank:]
vt = v[:rank, 0]
free_sol = tau.vstack(vt - V * tau, tau)
# Undo permutation
sol = zeros(col, 1)
for k, v in enumerate(free_sol):
sol[permutation[k], 0] = v
if freevar:
return sol, tau, free_var_index
else:
return sol, tau
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
@classmethod
def hstack(cls, *args):
"""Return a matrix formed by joining args horizontally (i.e.
by repeated application of row_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.hstack(eye(2), 2*eye(2))
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]])
"""
kls = type(args[0])
return reduce(kls.row_join, args)
def integrate(self, *args):
"""Integrate each element of the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.integrate((x, ))
Matrix([
[x**2/2, x*y],
[ x, 0]])
>>> M.integrate((x, 0, 2))
Matrix([
[2, 2*y],
[2, 0]])
See Also
========
limit
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].integrate(*args))
def inv_mod(self, m):
"""
Returns the inverse of the matrix `K` (mod `m`), if it exists.
Method to find the matrix inverse of `K` (mod `m`) implemented in this function:
* Compute `\mathrm{adj}(K) = \mathrm{cof}(K)^t`, the adjoint matrix of `K`.
* Compute `r = 1/\mathrm{det}(K) \pmod m`.
* `K^{-1} = r\cdot \mathrm{adj}(K) \pmod m`.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.inv_mod(5)
Matrix([
[3, 1],
[4, 2]])
>>> A.inv_mod(3)
Matrix([
[1, 1],
[0, 1]])
"""
from sympy.ntheory import totient
if not self.is_square:
raise NonSquareMatrixError()
N = self.cols
phi = totient(m)
det_K = self.det()
if gcd(det_K, m) != 1:
raise ValueError('Matrix is not invertible (mod %d)' % m)
det_inv = pow(int(det_K), int(phi - 1), int(m))
K_adj = self.cofactorMatrix().transpose()
K_inv = self.__class__(N, N,
[det_inv * K_adj[i, j] % m for i in range(N) for
j in range(N)])
return K_inv
def inverse_ADJ(self, iszerofunc=_iszero):
"""Calculates the inverse using the adjugate matrix and a determinant.
See Also
========
inv
inverse_LU
inverse_GE
"""
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
d = self.berkowitz_det()
zero = d.equals(0)
if zero is None:
# if equals() can't decide, will rref be able to?
ok = self.rref(simplify=True)[0]
zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
if zero:
raise ValueError("Matrix det == 0; not invertible.")
return self.adjugate() / d
def inverse_GE(self, iszerofunc=_iszero):
"""Calculates the inverse using Gaussian elimination.
See Also
========
inv
inverse_LU
inverse_ADJ
"""
from .dense import Matrix
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
big = Matrix.hstack(self.as_mutable(), Matrix.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc, simplify=True)[0]
if any(iszerofunc(red[j, j]) for j in range(red.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self._new(red[:, big.rows:])
def inverse_LU(self, iszerofunc=_iszero):
"""Calculates the inverse using LU decomposition.
See Also
========
inv
inverse_GE
inverse_ADJ
"""
if not self.is_square:
raise NonSquareMatrixError()
ok = self.rref(simplify=True)[0]
if any(iszerofunc(ok[j, j]) for j in range(ok.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inv(self, method=None, **kwargs):
"""
Return the inverse of a matrix.
CASE 1: If the matrix is a dense matrix.
Return the matrix inverse using the method indicated (default
is Gauss elimination).
kwargs
======
method : ('GE', 'LU', or 'ADJ')
Notes
=====
According to the ``method`` keyword, it calls the appropriate method:
GE .... inverse_GE(); default
LU .... inverse_LU()
ADJ ... inverse_ADJ()
See Also
========
inverse_LU
inverse_GE
inverse_ADJ
Raises
------
ValueError
If the determinant of the matrix is zero.
CASE 2: If the matrix is a sparse matrix.
Return the matrix inverse using Cholesky or LDL (default).
kwargs
======
method : ('CH', 'LDL')
Notes
=====
According to the ``method`` keyword, it calls the appropriate method:
LDL ... inverse_LDL(); default
CH .... inverse_CH()
Raises
------
ValueError
If the determinant of the matrix is zero.
"""
if not self.is_square:
raise NonSquareMatrixError()
if method is not None:
kwargs['method'] = method
return self._eval_inverse(**kwargs)
def is_diagonalizable(self, reals_only=False, clear_subproducts=True):
"""Check if matrix is diagonalizable.
If reals_only==True then check that diagonalized matrix consists of the only not complex values.
Some subproducts could be used further in other methods to avoid double calculations,
By default (if clear_subproducts==True) they will be deleted.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> m.is_diagonalizable()
True
>>> m = Matrix(2, 2, [0, 1, 0, 0])
>>> m
Matrix([
[0, 1],
[0, 0]])
>>> m.is_diagonalizable()
False
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_diagonalizable()
True
>>> m.is_diagonalizable(True)
False
See Also
========
is_diagonal
diagonalize
"""
if not self.is_square:
return False
res = False
self._is_symbolic = self.is_symbolic()
self._is_symmetric = self.is_symmetric()
self._eigenvects = None
self._eigenvects = self.eigenvects(simplify=True)
all_iscorrect = True
for eigenval, multiplicity, vects in self._eigenvects:
if len(vects) != multiplicity:
all_iscorrect = False
break
elif reals_only and not eigenval.is_real:
all_iscorrect = False
break
res = all_iscorrect
if clear_subproducts:
self._diagonalize_clear_subproducts()
return res
def is_nilpotent(self):
"""Checks if a matrix is nilpotent.
A matrix B is nilpotent if for some integer k, B**k is
a zero matrix.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
True
>>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
False
"""
if not self:
return True
if not self.is_square:
raise NonSquareMatrixError(
"Nilpotency is valid only for square matrices")
x = Dummy('x')
if self.charpoly(x).args[0] == x ** self.rows:
return True
return False
def jacobian(self, X):
"""Calculates the Jacobian matrix (derivative of a vectorial function).
Parameters
==========
self : vector of expressions representing functions f_i(x_1, ..., x_n).
X : set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order
(i.e., jacobian() should always work).
Examples
========
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0]])
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)]])
See Also
========
hessian
wronskian
"""
if not isinstance(X, MatrixBase):
X = self._new(X)
# Both X and self can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return self._new(m, n, lambda j, i: self[j].diff(X[i]))
def jordan_cell(self, eigenval, n):
n = int(n)
from sympy.matrices import MutableMatrix
out = MutableMatrix.zeros(n)
for i in range(n - 1):
out[i, i] = eigenval
out[i, i + 1] = 1
out[n - 1, n - 1] = eigenval
return type(self)(out)
def jordan_cells(self, calc_transformation=True):
r"""Return a list of Jordan cells of current matrix.
This list shape Jordan matrix J.
If calc_transformation is specified as False, then transformation P such that
`J = P^{-1} \cdot M \cdot P`
will not be calculated.
Notes
=====
Calculation of transformation P is not implemented yet.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 4, [
... 6, 5, -2, -3,
... -3, -1, 3, 3,
... 2, 1, -2, -3,
... -1, 1, 5, 5])
>>> P, Jcells = m.jordan_cells()
>>> Jcells[0]
Matrix([
[2, 1],
[0, 2]])
>>> Jcells[1]
Matrix([
[2, 1],
[0, 2]])
See Also
========
jordan_form
"""
n = self.rows
Jcells = []
Pcols_new = []
jordan_block_structures = self._jordan_block_structure()
from sympy.matrices import MutableMatrix
# Order according to default_sort_key, this makes sure the order is the same as in .diagonalize():
for eigenval in (
sorted(list(jordan_block_structures.keys()), key=default_sort_key)):
l_jordan_chains = jordan_block_structures[eigenval]
for s in reversed(sorted(
(l_jordan_chains).keys())): # Start with the biggest block
s_chains = l_jordan_chains[s]
block = self.jordan_cell(eigenval, s)
number_of_s_chains = len(s_chains)
for i in range(0, number_of_s_chains):
Jcells.append(type(self)(block))
chain_vectors = s_chains[i]
lc = len(chain_vectors)
assert lc == s
for j in range(0, lc):
generalized_eigen_vector = chain_vectors[j]
Pcols_new.append(generalized_eigen_vector)
P = MutableMatrix.zeros(n)
for j in range(0, n):
P[:, j] = Pcols_new[j]
return type(self)(P), Jcells
def jordan_form(self, calc_transformation=True):
r"""Return Jordan form J of current matrix.
Also the transformation P such that
`J = P^{-1} \cdot M \cdot P`
and the jordan blocks forming J
will be calculated.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix([
... [ 6, 5, -2, -3],
... [-3, -1, 3, 3],
... [ 2, 1, -2, -3],
... [-1, 1, 5, 5]])
>>> P, J = m.jordan_form()
>>> J
Matrix([
[2, 1, 0, 0],
[0, 2, 0, 0],
[0, 0, 2, 1],
[0, 0, 0, 2]])
See Also
========
jordan_cells
"""
P, Jcells = self.jordan_cells()
from sympy.matrices import diag
J = diag(*Jcells)
return P, type(self)(J)
def key2bounds(self, keys):
"""Converts a key with potentially mixed types of keys (integer and slice)
into a tuple of ranges and raises an error if any index is out of self's
range.
See Also
========
key2ij
"""
islice, jslice = [isinstance(k, slice) for k in keys]
if islice:
if not self.rows:
rlo = rhi = 0
else:
rlo, rhi = keys[0].indices(self.rows)[:2]
else:
rlo = a2idx(keys[0], self.rows)
rhi = rlo + 1
if jslice:
if not self.cols:
clo = chi = 0
else:
clo, chi = keys[1].indices(self.cols)[:2]
else:
clo = a2idx(keys[1], self.cols)
chi = clo + 1
return rlo, rhi, clo, chi
def key2ij(self, key):
"""Converts key into canonical form, converting integers or indexable
items into valid integers for self's range or returning slices
unchanged.
See Also
========
key2bounds
"""
if is_sequence(key):
if not len(key) == 2:
raise TypeError('key must be a sequence of length 2')
return [a2idx(i, n) if not isinstance(i, slice) else i
for i, n in zip(key, self.shape)]
elif isinstance(key, slice):
return key.indices(len(self))[:2]
else:
return divmod(a2idx(key, len(self)), self.cols)
def LDLdecomposition(self):
"""Returns the LDL Decomposition (L, D) of matrix A,
such that L * D * L.T == A
This method eliminates the use of square root.
Further this ensures that all the diagonal entries of L are 1.
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T * A.inv() == eye(A.rows)
True
See Also
========
cholesky
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._LDLdecomposition()
def LDLsolve(self, rhs):
"""Solves Ax = B using LDL decomposition,
for a general square and non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.LDLsolve(B) == B/2
True
See Also
========
LDLdecomposition
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LUsolve
QRsolve
pinv_solve
"""
if self.is_symmetric():
L, D = self.LDLdecomposition()
elif self.rows >= self.cols:
L, D = (self.T * self).LDLdecomposition()
rhs = self.T * rhs
else:
raise NotImplementedError('Under-determined System. '
'Try M.gauss_jordan_solve(rhs)')
Y = L._lower_triangular_solve(rhs)
Z = D._diagonal_solve(Y)
return (L.T)._upper_triangular_solve(Z)
def left_eigenvects(self, **flags):
"""Returns left eigenvectors and eigenvalues.
This function returns the list of triples (eigenval, multiplicity,
basis) for the left eigenvectors. Options are the same as for
eigenvects(), i.e. the ``**flags`` arguments gets passed directly to
eigenvects().
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]])
>>> M.eigenvects()
[(-1, 1, [Matrix([
[-1],
[ 1],
[ 0]])]), (0, 1, [Matrix([
[ 0],
[-1],
[ 1]])]), (2, 1, [Matrix([
[2/3],
[1/3],
[ 1]])])]
>>> M.left_eigenvects()
[(-1, 1, [Matrix([[-2, 1, 1]])]), (0, 1, [Matrix([[-1, -1, 1]])]), (2,
1, [Matrix([[1, 1, 1]])])]
"""
mat = self
left_transpose = mat.transpose().eigenvects(**flags)
left = []
for (ev, mult, ltmp) in left_transpose:
left.append((ev, mult, [l.transpose() for l in ltmp]))
return left
def limit(self, *args):
"""Calculate the limit of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.limit(x, 2)
Matrix([
[2, y],
[1, 0]])
See Also
========
integrate
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].limit(*args))
def lower_triangular_solve(self, rhs):
"""Solves Ax = B, where A is a lower triangular matrix.
See Also
========
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise ShapeError("Matrices size mismatch.")
if not self.is_lower:
raise ValueError("Matrix must be lower triangular.")
return self._lower_triangular_solve(rhs)
def LUdecomposition(self, iszerofunc=_iszero):
"""Returns the decomposition LU and the row swaps p.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[4, 3], [6, 3]])
>>> L, U, _ = a.LUdecomposition()
>>> L
Matrix([
[ 1, 0],
[3/2, 1]])
>>> U
Matrix([
[4, 3],
[0, -3/2]])
See Also
========
cholesky
LDLdecomposition
QRdecomposition
LUdecomposition_Simple
LUdecompositionFF
LUsolve
"""
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i, j] = combined[i, j]
else:
if i == j:
L[i, i] = 1
U[i, j] = combined[i, j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
"""Returns A comprised of L, U (L's diag entries are 1) and
p which is the list of the row swaps (in order).
See Also
========
LUdecomposition
LUdecompositionFF
LUsolve
"""
if not self.is_square:
raise NonSquareMatrixError(
"A Matrix must be square to apply LUdecomposition_Simple().")
n = self.rows
A = self.as_mutable()
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i, j] = A[i, j] - A[i, k] * A[k, j]
pivot = -1
for i in range(j, n):
for k in range(j):
A[i, j] = A[i, j] - A[i, k] * A[k, j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and not iszerofunc(A[i, j]):
pivot = i
if pivot < 0:
# this result is based on iszerofunc's analysis of the possible pivots, so even though
# the element may not be strictly zero, the supplied iszerofunc's evaluation gave True
raise ValueError("No nonzero pivot found; inversion failed.")
if pivot != j: # row must be swapped
A.row_swap(pivot, j)
p.append([pivot, j])
scale = 1 / A[j, j]
for i in range(j + 1, n):
A[i, j] = A[i, j] * scale
return A, p
def LUdecompositionFF(self):
"""Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
**Reference**
- W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
See Also
========
LUdecomposition
LUdecomposition_Simple
LUsolve
"""
from sympy.matrices import SparseMatrix
zeros = SparseMatrix.zeros
eye = SparseMatrix.eye
n, m = self.rows, self.cols
U, L, P = self.as_mutable(), eye(n), eye(n)
DD = zeros(n, n)
oldpivot = 1
for k in range(n - 1):
if U[k, k] == 0:
for kpivot in range(k + 1, n):
if U[kpivot, k]:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L[k, k] = Ukk = U[k, k]
DD[k, k] = oldpivot * Ukk
for i in range(k + 1, n):
L[i, k] = Uik = U[i, k]
for j in range(k + 1, m):
U[i, j] = (Ukk * U[i, j] - U[k, j] * Uik) / oldpivot
U[i, k] = 0
oldpivot = Ukk
DD[n - 1, n - 1] = oldpivot
return P, L, DD, U
def LUsolve(self, rhs, iszerofunc=_iszero):
"""Solve the linear system Ax = rhs for x where A = self.
This is for symbolic matrices, for real or complex ones use
mpmath.lu_solve or mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
QRsolve
pinv_solve
LUdecomposition
"""
if rhs.rows != self.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm).as_mutable()
# forward substitution, all diag entries are scaled to 1
for i in range(n):
for j in range(i):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y * scale)
# backward substitution
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y * scale)
scale = A[i, i]
b.row_op(i, lambda x, _: x / scale)
return rhs.__class__(b)
def minorEntry(self, i, j, method="berkowitz"):
"""Calculate the minor of an element.
See Also
========
minorMatrix
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
return self.minorMatrix(i, j).det(method)
def minorMatrix(self, i, j):
"""Creates the minor matrix of a given element.
See Also
========
minorEntry
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
M = self.as_mutable()
M.row_del(i)
M.col_del(j)
return self._new(M)
def multiply_elementwise(self, b):
"""Return the Hadamard product (elementwise product) of A and B
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> A.multiply_elementwise(B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
cross
dot
multiply
"""
from sympy.matrices import matrix_multiply_elementwise
return matrix_multiply_elementwise(self, b)
def multiply(self, b):
"""Returns self*b
See Also
========
dot
cross
multiply_elementwise
"""
return self * b
def normalized(self):
"""Return the normalized version of ``self``.
See Also
========
norm
"""
if self.rows != 1 and self.cols != 1:
raise ShapeError("A Matrix must be a vector to normalize.")
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def norm(self, ord=None):
"""Return the Norm of a Matrix or Vector.
In the simplest case this is the geometric size of the vector
Other norms can be specified by the ord parameter
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm - does not exist
inf -- max(abs(x))
-inf -- min(abs(x))
1 -- as below
-1 -- as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other - does not exist sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
Examples
========
>>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo
>>> x = Symbol('x', real=True)
>>> v = Matrix([cos(x), sin(x)])
>>> trigsimp( v.norm() )
1
>>> v.norm(10)
(sin(x)**10 + cos(x)**10)**(1/10)
>>> A = Matrix([[1, 1], [1, 1]])
>>> A.norm(2)# Spectral norm (max of |Ax|/|x| under 2-vector-norm)
2
>>> A.norm(-2) # Inverse spectral norm (smallest singular value)
0
>>> A.norm() # Frobenius Norm
2
>>> Matrix([1, -2]).norm(oo)
2
>>> Matrix([-1, 2]).norm(-oo)
1
See Also
========
normalized
"""
# Row or Column Vector Norms
vals = list(self.values()) or [0]
if self.rows == 1 or self.cols == 1:
if ord == 2 or ord is None: # Common case sqrt(<x, x>)
return sqrt(Add(*(abs(i) ** 2 for i in vals)))
elif ord == 1: # sum(abs(x))
return Add(*(abs(i) for i in vals))
elif ord == S.Infinity: # max(abs(x))
return Max(*[abs(i) for i in vals])
elif ord == S.NegativeInfinity: # min(abs(x))
return Min(*[abs(i) for i in vals])
# Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
# Note that while useful this is not mathematically a norm
try:
return Pow(Add(*(abs(i) ** ord for i in vals)), S(1) / ord)
except (NotImplementedError, TypeError):
raise ValueError("Expected order to be Number, Symbol, oo")
# Matrix Norms
else:
if ord == 2: # Spectral Norm
# Maximum singular value
return Max(*self.singular_values())
elif ord == -2:
# Minimum singular value
return Min(*self.singular_values())
elif (ord is None or isinstance(ord,
string_types) and ord.lower() in
['f', 'fro', 'frobenius', 'vector']):
# Reshape as vector and send back to norm function
return self.vec().norm(ord=2)
else:
raise NotImplementedError("Matrix Norms under development")
def nullspace(self, simplify=False):
"""Returns list of vectors (Matrix objects) that span nullspace of self
Examples
========
>>> from sympy.matrices import Matrix
>>> m = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6])
>>> m
Matrix([
[ 1, 3, 0],
[-2, -6, 0],
[ 3, 9, 6]])
>>> m.nullspace()
[Matrix([
[-3],
[ 1],
[ 0]])]
See Also
========
columnspace
"""
from sympy.matrices import zeros
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
reduced, pivots = self.rref(simplify=simpfunc)
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeros(self.cols, 1))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1] * len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i, 0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i + 1, self.cols):
line = pivots.index(i)
v = reduced[line, j]
if simplify:
v = simpfunc(v)
if v:
if j in pivots:
# XXX: Is this the correct error?
raise NotImplementedError(
"Could not compute the nullspace of `self`.")
basis[basiskey.index(j)][i, 0] = -v
return [self._new(b) for b in basis]
def permuteBkwd(self, perm):
"""Permute the rows of the matrix with the given permutation in reverse.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteBkwd([[0, 1], [0, 2]])
Matrix([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
See Also
========
permuteFwd
"""
copy = self.copy()
for i in range(len(perm) - 1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
"""Permute the rows of the matrix with the given permutation.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteFwd([[0, 1], [0, 2]])
Matrix([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
See Also
========
permuteBkwd
"""
copy = self.copy()
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def pinv_solve(self, B, arbitrary_matrix=None):
"""Solve Ax = B using the Moore-Penrose pseudoinverse.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, one will
be returned based on the value of arbitrary_matrix. If no solutions
exist, the least-squares solution is returned.
Parameters
==========
B : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
arbitrary_matrix : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
matrix. This parameter may be set to a specific matrix to use
for that purpose; if so, it must be the same shape as x, with as
many rows as matrix A has columns, and as many columns as matrix
B. If left as None, an appropriate matrix containing dummy
symbols in the form of ``wn_m`` will be used, with n and m being
row and column position of each symbol.
Returns
=======
x : Matrix
The matrix that will satisfy Ax = B. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([7, 8])
>>> A.pinv_solve(B)
Matrix([
[ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18],
[-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9],
[ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]])
>>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0]))
Matrix([
[-55/18],
[ 1/9],
[ 59/18]])
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
Notes
=====
This may return either exact solutions or least squares solutions.
To determine which, check ``A * A.pinv() * B == B``. It will be
True if exact solutions exist, and False if only a least-squares
solution exists. Be aware that the left hand side of that equation
may need to be simplified to correctly compare to the right hand
side.
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system
"""
from sympy.matrices import eye
A = self
A_pinv = self.pinv()
if arbitrary_matrix is None:
rows, cols = A.cols, B.cols
w = symbols('w:{0}_:{1}'.format(rows, cols), cls=Dummy)
arbitrary_matrix = self.__class__(cols, rows, w).T
return A_pinv * B + (eye(A.cols) - A_pinv * A) * arbitrary_matrix
def pinv(self):
"""Calculate the Moore-Penrose pseudoinverse of the matrix.
The Moore-Penrose pseudoinverse exists and is unique for any matrix.
If the matrix is invertible, the pseudoinverse is the same as the
inverse.
Examples
========
>>> from sympy import Matrix
>>> Matrix([[1, 2, 3], [4, 5, 6]]).pinv()
Matrix([
[-17/18, 4/9],
[ -1/9, 1/9],
[ 13/18, -2/9]])
See Also
========
inv
pinv_solve
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse
"""
A = self
AH = self.H
# Trivial case: pseudoinverse of all-zero matrix is its transpose.
if A.is_zero:
return AH
try:
if self.rows >= self.cols:
return (AH * A).inv() * AH
else:
return AH * (A * AH).inv()
except ValueError:
# Matrix is not full rank, so A*AH cannot be inverted.
raise NotImplementedError('Rank-deficient matrices are not yet '
'supported.')
def print_nonzero(self, symb="X"):
"""Shows location of non-zero entries for fast shape lookup.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> m = Matrix(2, 3, lambda i, j: i*3+j)
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5]])
>>> m.print_nonzero()
[ XX]
[XXX]
>>> m = eye(4)
>>> m.print_nonzero("x")
[x ]
[ x ]
[ x ]
[ x]
"""
s = []
for i in range(self.rows):
line = []
for j in range(self.cols):
if self[i, j] == 0:
line.append(" ")
else:
line.append(str(symb))
s.append("[%s]" % ''.join(line))
print('\n'.join(s))
def project(self, v):
"""Return the projection of ``self`` onto the line containing ``v``.
Examples
========
>>> from sympy import Matrix, S, sqrt
>>> V = Matrix([sqrt(3)/2, S.Half])
>>> x = Matrix([[1, 0]])
>>> V.project(x)
Matrix([[sqrt(3)/2, 0]])
>>> V.project(-x)
Matrix([[sqrt(3)/2, 0]])
"""
return v * (self.dot(v) / v.dot(v))
def QRdecomposition(self):
"""Return Q, R where A = Q*R, Q is orthogonal and R is upper triangular.
Examples
========
This is the example from wikipedia:
>>> from sympy import Matrix
>>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175, -58/175],
[ 3/7, 158/175, 6/175],
[-2/7, 6/35, -33/35]])
>>> R
Matrix([
[14, 21, -14],
[ 0, 175, -70],
[ 0, 0, 35]])
>>> A == Q*R
True
QR factorization of an identity matrix:
>>> A = Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> R
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
cholesky
LDLdecomposition
LUdecomposition
QRsolve
"""
cls = self.__class__
mat = self.as_mutable()
if not mat.rows >= mat.cols:
raise MatrixError(
"The number of rows must be greater than columns")
n = mat.rows
m = mat.cols
rank = n
row_reduced = mat.rref()[0]
for i in range(row_reduced.rows):
if row_reduced.row(i).norm() == 0:
rank -= 1
if not rank == mat.cols:
raise MatrixError("The rank of the matrix must match the columns")
Q, R = mat.zeros(n, m), mat.zeros(m)
for j in range(m): # for each column vector
tmp = mat[:, j] # take original v
for i in range(j):
# subtract the project of mat on new vector
tmp -= Q[:, i] * mat[:, j].dot(Q[:, i])
tmp.expand()
# normalize it
R[j, j] = tmp.norm()
Q[:, j] = tmp / R[j, j]
if Q[:, j].norm() != 1:
raise NotImplementedError(
"Could not normalize the vector %d." % j)
for i in range(j):
R[i, j] = Q[:, i].dot(mat[:, j])
return cls(Q), cls(R)
def QRsolve(self, b):
"""Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector
'b'. The method returns the solution vector 'x'. If 'b' is a
matrix, the system is solved for each column of 'b' and the
return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
pinv_solve
QRdecomposition
"""
Q, R = self.as_mutable().QRdecomposition()
y = Q.T * b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n - 1, -1, -1):
tmp = y[j, :]
for k in range(j + 1, n):
tmp -= R[j, k] * x[n - 1 - k]
x.append(tmp / R[j, j])
return self._new([row._mat for row in reversed(x)])
def rank(self, iszerofunc=_iszero, simplify=False):
"""
Returns the rank of a matrix
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rank()
2
>>> n = Matrix(3, 3, range(1, 10))
>>> n.rank()
2
"""
row_reduced = self.rref(iszerofunc=iszerofunc, simplify=simplify)
rank = len(row_reduced[-1])
return rank
def refine(self, assumptions=True):
"""Apply refine to each element of the matrix.
Examples
========
>>> from sympy import Symbol, Matrix, Abs, sqrt, Q
>>> x = Symbol('x')
>>> Matrix([[Abs(x)**2, sqrt(x**2)],[sqrt(x**2), Abs(x)**2]])
Matrix([
[ Abs(x)**2, sqrt(x**2)],
[sqrt(x**2), Abs(x)**2]])
>>> _.refine(Q.real(x))
Matrix([
[ x**2, Abs(x)],
[Abs(x), x**2]])
"""
return self.applyfunc(lambda x: refine(x, assumptions))
def replace(self, F, G, map=False):
"""Replaces Function F in Matrix entries with Function G.
Examples
========
>>> from sympy import symbols, Function, Matrix
>>> F, G = symbols('F, G', cls=Function)
>>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M
Matrix([
[F(0), F(1)],
[F(1), F(2)]])
>>> N = M.replace(F,G)
>>> N
Matrix([
[G(0), G(1)],
[G(1), G(2)]])
"""
M = self[:, :]
return M.applyfunc(lambda x: x.replace(F, G, map))
def rref(self, iszerofunc=_iszero, simplify=False):
"""Return reduced row-echelon form of matrix and indices of pivot vars.
To simplify elements before finding nonzero pivots set simplify=True
(to use the default SymPy simplify function) or pass a custom
simplify function.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rref()
(Matrix([
[1, 0],
[0, 1]]), [0, 1])
>>> rref_matrix, rref_pivots = m.rref()
>>> rref_matrix
Matrix([
[1, 0],
[0, 1]])
>>> rref_pivots
[0, 1]
"""
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
# pivot: index of next row to contain a pivot
pivot, r = 0, self.as_mutable()
# pivotlist: indices of pivot variables (non-free)
pivotlist = []
for i in range(r.cols):
if pivot == r.rows:
break
if simplify:
r[pivot, i] = simpfunc(r[pivot, i])
pivot_offset, pivot_val, assumed_nonzero, newly_determined = _find_reasonable_pivot(
r[pivot:, i], iszerofunc, simpfunc)
# `_find_reasonable_pivot` may have simplified
# some elements along the way. If they were simplified
# and then determined to be either zero or non-zero for
# sure, they are stored in the `newly_determined` list
for (offset, val) in newly_determined:
r[pivot + offset, i] = val
# if `pivot_offset` is None, this column has no
# pivot
if pivot_offset is None:
continue
# swap the pivot column into place
pivot_pos = pivot + pivot_offset
r.row_swap(pivot, pivot_pos)
r.row_op(pivot, lambda x, _: x / pivot_val)
for j in range(r.rows):
if j == pivot:
continue
pivot_val = r[j, i]
r.zip_row_op(j, pivot, lambda x, y: x - pivot_val * y)
pivotlist.append(i)
pivot += 1
return self._new(r), pivotlist
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy.matrices import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def simplify(self, ratio=1.7, measure=count_ops):
"""Apply simplify to each element of the matrix.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, cos
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
Matrix([[x*sin(y)**2 + x*cos(y)**2]])
>>> _.simplify()
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.simplify(ratio, measure))
def singular_values(self):
"""Compute the singular values of a Matrix
Examples
========
>>> from sympy import Matrix, Symbol
>>> x = Symbol('x', real=True)
>>> A = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
>>> A.singular_values()
[sqrt(x**2 + 1), 1, 0]
See Also
========
condition_number
"""
mat = self.as_mutable()
# Compute eigenvalues of A.H A
valmultpairs = (mat.H * mat).eigenvals()
# Expands result from eigenvals into a simple list
vals = []
for k, v in valmultpairs.items():
vals += [sqrt(k)] * v # dangerous! same k in several spots!
# sort them in descending order
vals.sort(reverse=True, key=default_sort_key)
return vals
def solve_least_squares(self, rhs, method='CH'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = Matrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
if method == 'CH':
return self.cholesky_solve(rhs)
t = self.T
return (t * self).inv(method=method) * t * rhs
def solve(self, rhs, method='GE'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system. '
'Try M.gauss_jordan_solve(rhs)')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method) * rhs
def table(self, printer, rowstart='[', rowend=']', rowsep='\n',
colsep=', ', align='right'):
r"""
String form of Matrix as a table.
``printer`` is the printer to use for on the elements (generally
something like StrPrinter())
``rowstart`` is the string used to start each row (by default '[').
``rowend`` is the string used to end each row (by default ']').
``rowsep`` is the string used to separate rows (by default a newline).
``colsep`` is the string used to separate columns (by default ', ').
``align`` defines how the elements are aligned. Must be one of 'left',
'right', or 'center'. You can also use '<', '>', and '^' to mean the
same thing, respectively.
This is used by the string printer for Matrix.
Examples
========
>>> from sympy import Matrix
>>> from sympy.printing.str import StrPrinter
>>> M = Matrix([[1, 2], [-33, 4]])
>>> printer = StrPrinter()
>>> M.table(printer)
'[ 1, 2]\n[-33, 4]'
>>> print(M.table(printer))
[ 1, 2]
[-33, 4]
>>> print(M.table(printer, rowsep=',\n'))
[ 1, 2],
[-33, 4]
>>> print('[%s]' % M.table(printer, rowsep=',\n'))
[[ 1, 2],
[-33, 4]]
>>> print(M.table(printer, colsep=' '))
[ 1 2]
[-33 4]
>>> print(M.table(printer, align='center'))
[ 1 , 2]
[-33, 4]
>>> print(M.table(printer, rowstart='{', rowend='}'))
{ 1, 2}
{-33, 4}
"""
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return '[]'
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
s = printer._print(self[i, j])
res[-1].append(s)
maxlen[j] = max(len(s), maxlen[j])
# Patch strings together
align = {
'left': 'ljust',
'right': 'rjust',
'center': 'center',
'<': 'ljust',
'>': 'rjust',
'^': 'center',
}[align]
for i, row in enumerate(res):
for j, elem in enumerate(row):
row[j] = getattr(elem, align)(maxlen[j])
res[i] = rowstart + colsep.join(row) + rowend
return rowsep.join(res)
__matmul__ = __mul__
__rmatmul__ = __rmul__
def upper_triangular_solve(self, rhs):
"""Solves Ax = B, where A is an upper triangular matrix.
See Also
========
lower_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise TypeError("Matrix size mismatch.")
if not self.is_upper:
raise TypeError("Matrix is not upper triangular.")
return self._upper_triangular_solve(rhs)
def vech(self, diagonal=True, check_symmetry=True):
"""Return the unique elements of a symmetric Matrix as a one column matrix
by stacking the elements in the lower triangle.
Arguments:
diagonal -- include the diagonal cells of self or not
check_symmetry -- checks symmetry of self but not completely reliably
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 2], [2, 3]])
>>> m
Matrix([
[1, 2],
[2, 3]])
>>> m.vech()
Matrix([
[1],
[2],
[3]])
>>> m.vech(diagonal=False)
Matrix([[2]])
See Also
========
vec
"""
from sympy.matrices import zeros
c = self.cols
if c != self.rows:
raise ShapeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError(
"Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros(c * (c + 1) // 2, 1)
for j in range(c):
for i in range(j, c):
v[count] = self[i, j]
count += 1
else:
v = zeros(c * (c - 1) // 2, 1)
for j in range(c):
for i in range(j + 1, c):
v[count] = self[i, j]
count += 1
return v
@classmethod
def vstack(cls, *args):
"""Return a matrix formed by joining args vertically (i.e.
by repeated application of col_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.vstack(eye(2), 2*eye(2))
Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]])
"""
kls = type(args[0])
return reduce(kls.col_join, args)
_eval_simplify = simplify
charpoly = berkowitz_charpoly
def classof(A, B):
"""
Get the type of the result when combining matrices of different types.
Currently the strategy is that immutability is contagious.
Examples
========
>>> from sympy import Matrix, ImmutableMatrix
>>> from sympy.matrices.matrices import classof
>>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix
>>> IM = ImmutableMatrix([[1, 2], [3, 4]])
>>> classof(M, IM)
<class 'sympy.matrices.immutable.ImmutableMatrix'>
"""
try:
if A._class_priority > B._class_priority:
return A.__class__
else:
return B.__class__
except Exception:
pass
try:
import numpy
if isinstance(A, numpy.ndarray):
return B.__class__
if isinstance(B, numpy.ndarray):
return A.__class__
except Exception:
pass
raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if type(j) is not int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (j,))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j,))
return int(j)
def _find_reasonable_pivot(col, iszerofunc=_iszero, simpfunc=_simplify):
""" Find the lowest index of an item in `col` that is
suitable for a pivot. If `col` consists only of
Floats, the pivot with the largest norm is returned.
Otherwise, the first element where `iszerofunc` returns
False is used. If `iszerofunc` doesn't return false,
items are simplified and retested until a suitable
pivot is found.
Returns a 4-tuple
(pivot_offset, pivot_val, assumed_nonzero, newly_determined)
where pivot_offset is the index of the pivot, pivot_val is
the (possibly simplified) value of the pivot, assumed_nonzero
is True if an assumption that the pivot was non-zero
was made without being probed, and newly_determined are
elements that were simplified during the process of pivot
finding."""
newly_determined = []
col = list(col)
# a column that contains a mix of floats and integers
# but at least one float is considered a numerical
# column, and so we do partial pivoting
if all(isinstance(x, (Float, Integer)) for x in col) and any(
isinstance(x, Float) for x in col):
col_abs = [abs(x) for x in col]
max_value = max(col_abs)
if iszerofunc(max_value):
# just because iszerofunc returned True, doesn't
# mean the value is numerically zero. Make sure
# to replace all entries with numerical zeros
if max_value != 0:
newly_determined = [(i, 0) for i, x in enumerate(col) if x != 0]
return (None, None, False, newly_determined)
index = col_abs.index(max_value)
return (index, col[index], False, newly_determined)
# PASS 1 (iszerofunc directly)
possible_zeros = []
for i, x in enumerate(col):
is_zero = iszerofunc(x)
# is someone wrote a custom iszerofunc, it may return
# BooleanFalse or BooleanTrue instead of True or False,
# so use == for comparison instead of `is`
if is_zero == False:
# we found something that is definitely not zero
return (i, x, False, newly_determined)
possible_zeros.append(is_zero)
# by this point, we've found no certain non-zeros
if all(possible_zeros):
# if everything is definitely zero, we have
# no pivot
return (None, None, False, newly_determined)
# PASS 2 (iszerofunc after simplify)
# we haven't found any for-sure non-zeros, so
# go through the elements iszerofunc couldn't
# make a determination about and opportunistically
# simplify to see if we find something
for i, x in enumerate(col):
if possible_zeros[i] is not None:
continue
simped = simpfunc(x)
is_zero = iszerofunc(simped)
if is_zero == True or is_zero == False:
newly_determined.append((i, simped))
if is_zero == False:
return (i, simped, False, newly_determined)
possible_zeros[i] = is_zero
# after simplifying, some things that were recognized
# as zeros might be zeros
if all(possible_zeros):
# if everything is definitely zero, we have
# no pivot
return (None, None, False, newly_determined)
# PASS 3 (.equals(0))
# some expressions fail to simplify to zero, but
# `.equals(0)` evaluates to True. As a last-ditch
# attempt, apply `.equals` to these expressions
for i, x in enumerate(col):
if possible_zeros[i] is not None:
continue
if x.equals(S.Zero):
# `.iszero` may return False with
# an implicit assumption (e.g., `x.equals(0)`
# when `x` is a symbol), so only treat it
# as proved when `.equals(0)` returns True
possible_zeros[i] = True
newly_determined.append((i, S.Zero))
if all(possible_zeros):
return (None, None, False, newly_determined)
# at this point there is nothing that could definitely
# be a pivot. To maintain compatibility with existing
# behavior, we'll assume that an illdetermined thing is
# non-zero. We should probably raise a warning in this case
i = possible_zeros.index(None)
return (i, col[i], True, newly_determined)
class _MinimalMatrix(object):
"""Class providing the minimum functionality
for a matrix-like object and implementing every method
required for a `MatrixRequired`. This class does not have everything
needed to become a full-fledged sympy object, but it will satisfy the
requirements of anything inheriting from `MatrixRequired`. If you wish
to make a specialized matrix type, make sure to implement these
methods and properties with the exception of `__init__` and `__repr__`
which are included for convenience."""
is_MatrixLike = True
_sympify = staticmethod(sympify)
_class_priority = 3
is_Matrix = True
is_MatrixExpr = False
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __init__(self, rows, cols=None, mat=None):
if isinstance(mat, FunctionType):
# if we passed in a function, use that to populate the indices
mat = list(mat(i, j) for i in range(rows) for j in range(cols))
try:
if cols is None and mat is None:
mat = rows
rows, cols = mat.shape
except AttributeError:
pass
try:
# if we passed in a list of lists, flatten it and set the size
if cols is None and mat is None:
mat = rows
cols = len(mat[0])
rows = len(mat)
mat = [x for l in mat for x in l]
except (IndexError, TypeError):
pass
self.mat = tuple(self._sympify(x) for x in mat)
self.rows, self.cols = rows, cols
if self.rows is None or self.cols is None:
raise NotImplementedError("Cannot initialize matrix with given parameters")
def __getitem__(self, key):
def _normalize_slices(row_slice, col_slice):
"""Ensure that row_slice and col_slice don't have
`None` in their arguments. Any integers are converted
to slices of length 1"""
if not isinstance(row_slice, slice):
row_slice = slice(row_slice, row_slice + 1, None)
row_slice = slice(*row_slice.indices(self.rows))
if not isinstance(col_slice, slice):
col_slice = slice(col_slice, col_slice + 1, None)
col_slice = slice(*col_slice.indices(self.cols))
return (row_slice, col_slice)
def _coord_to_index(i, j):
"""Return the index in _mat corresponding
to the (i,j) position in the matrix. """
return i * self.cols + j
if isinstance(key, tuple):
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
# if the coordinates are not slices, make them so
# and expand the slices so they don't contain `None`
i, j = _normalize_slices(i, j)
rowsList, colsList = list(range(self.rows))[i], \
list(range(self.cols))[j]
indices = (i * self.cols + j for i in rowsList for j in
colsList)
return self._new(len(rowsList), len(colsList),
list(self.mat[i] for i in indices))
# if the key is a tuple of ints, change
# it to an array index
key = _coord_to_index(i, j)
return self.mat[key]
def __eq__(self, other):
return self.shape == other.shape and list(self) == list(other)
def __len__(self):
return self.rows*self.cols
def __repr__(self):
return "_MinimalMatrix({}, {}, {})".format(self.rows, self.cols,
self.mat)
@property
def shape(self):
return (self.rows, self.cols)
| 30.237531 | 115 | 0.497566 | from __future__ import print_function, division
import collections
from sympy.core.add import Add
from sympy.core.basic import Basic, Atom
from sympy.core.expr import Expr
from sympy.core.function import count_ops
from sympy.core.logic import fuzzy_and
from sympy.core.power import Pow
from sympy.core.symbol import Symbol, Dummy, symbols
from sympy.core.numbers import Integer, ilcm, Float
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, default_sort_key, range, \
NotIterable
from sympy.polys import PurePoly, roots, cancel, gcd
from sympy.simplify import simplify as _simplify, signsimp, nsimplify
from sympy.utilities.iterables import flatten, numbered_symbols
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.functions import exp, factorial
from sympy.printing import sstr
from sympy.core.compatibility import reduce, as_int, string_types
from sympy.assumptions.refine import refine
from sympy.core.decorators import call_highest_priority
from types import FunctionType
def _iszero(x):
return x.is_zero
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
pass
class NonSquareMatrixError(ShapeError):
pass
class DeferredVector(Symbol, NotIterable):
def __getitem__(self, i):
if i == -0:
i = 0
if i < 0:
raise IndexError('DeferredVector index out of range')
component_name = '%s[%d]' % (self.name, i)
return Symbol(component_name)
def __str__(self):
return sstr(self)
def __repr__(self):
return "DeferredVector('%s')" % (self.name)
class MatrixRequired(object):
rows = None
cols = None
shape = None
_simplify = None
@classmethod
def _new(cls, *args, **kwargs):
raise NotImplementedError("Subclasses must implement this.")
def __eq__(self, other):
raise NotImplementedError("Subclasses must impliment this.")
def __getitem__(self, key):
raise NotImplementedError("Subclasses must implement this.")
def __len__(self):
raise NotImplementedError("Subclasses must implement this.")
class MatrixShaping(MatrixRequired):
def _eval_col_insert(self, pos, other):
cols = self.cols
def entry(i, j):
if j < pos:
return self[i, j]
elif pos <= j < pos + other.cols:
return other[i, j - pos]
return self[i, j - pos - other.cols]
return self._new(self.rows, self.cols + other.cols,
lambda i, j: entry(i, j))
def _eval_col_join(self, other):
rows = self.rows
def entry(i, j):
if i < rows:
return self[i, j]
return other[i - rows, j]
return classof(self, other)._new(self.rows + other.rows, self.cols,
lambda i, j: entry(i, j))
def _eval_extract(self, rowsList, colsList):
mat = list(self)
cols = self.cols
indices = (i * cols + j for i in rowsList for j in colsList)
return self._new(len(rowsList), len(colsList),
list(mat[i] for i in indices))
def _eval_get_diag_blocks(self):
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def _eval_row_insert(self, pos, other):
entries = list(self)
insert_pos = pos * self.cols
entries[insert_pos:insert_pos] = list(other)
return self._new(self.rows + other.rows, self.cols, entries)
def _eval_row_join(self, other):
cols = self.cols
def entry(i, j):
if j < cols:
return self[i, j]
return other[i, j - cols]
return classof(self, other)._new(self.rows, self.cols + other.cols,
lambda i, j: entry(i, j))
def _eval_tolist(self):
return [list(self[i,:]) for i in range(self.rows)]
def _eval_vec(self):
rows = self.rows
def entry(n, _):
j = n // rows
i = n - j * rows
return self[i, j]
return self._new(len(self), 1, entry)
def col_insert(self, pos, other):
if not self:
return type(self)(other)
if pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != other.rows:
raise ShapeError(
"self and other must have the same number of rows.")
return self._eval_col_insert(pos, other)
def col_join(self, other):
from sympy.matrices import MutableMatrix
if not self:
return type(self)(other)
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_col_join(other)
def col(self, j):
return self[:, j]
def extract(self, rowsList, colsList):
if not is_sequence(rowsList) or not is_sequence(colsList):
raise TypeError("rowsList and colsList must be iterable")
if rowsList and all(isinstance(i, bool) for i in rowsList):
rowsList = [index for index, item in enumerate(rowsList) if item]
if colsList and all(isinstance(i, bool) for i in colsList):
colsList = [index for index, item in enumerate(colsList) if item]
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._eval_extract(rowsList, colsList)
def get_diag_blocks(self):
return self._eval_get_diag_blocks()
def reshape(self, rows, cols):
if self.rows * self.cols != rows * cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self[i * cols + j])
def row_insert(self, pos, other):
from sympy.matrices import MutableMatrix
if not self:
return self._new(other)
if pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != other.cols:
raise ShapeError(
"`self` and `other` must have the same number of columns.")
return self._eval_row_insert(pos, other)
def row_join(self, other):
if not self:
return self._new(other)
if self.rows != other.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
return self._eval_row_join(other)
def row(self, i):
return self[i, :]
@property
def shape(self):
return (self.rows, self.cols)
def tolist(self):
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return self._eval_tolist()
def vec(self):
return self._eval_vec()
class MatrixProperties(MatrixRequired):
def _eval_atoms(self, *types):
result = set()
for i in self:
result.update(i.atoms(*types))
return result
def _eval_free_symbols(self):
return set().union(*(i.free_symbols for i in self))
def _eval_has(self, *patterns):
return any(a.has(*patterns) for a in self)
def _eval_is_anti_symmetric(self, simpfunc):
if not all(simpfunc(self[i, j] + self[j, i]).is_zero for i in range(self.rows) for j in range(self.cols)):
return False
return True
def _eval_is_diagonal(self):
for i in range(self.rows):
for j in range(self.cols):
if i != j and self[i, j]:
return False
return True
def _eval_is_hermetian(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i].conjugate()))
return mat.is_zero
def _eval_is_Identity(self):
def dirac(i, j):
if i == j:
return 1
return 0
return all(self[i, j] == dirac(i, j) for i in range(self.rows) for j in
range(self.cols))
def _eval_is_lower_hessenberg(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 2, self.cols))
def _eval_is_lower(self):
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
def _eval_is_symbolic(self):
return self.has(Symbol)
def _eval_is_symmetric(self, simpfunc):
mat = self._new(self.rows, self.cols, lambda i, j: simpfunc(self[i, j] - self[j, i]))
return mat.is_zero
def _eval_is_zero(self):
if any(i.is_zero == False for i in self):
return False
if any(i.is_zero == None for i in self):
return None
return True
def _eval_is_upper_hessenberg(self):
return all(self[i, j].is_zero
for i in range(2, self.rows)
for j in range(i - 1))
def _eval_values(self):
return [i for i in self if not i.is_zero]
def atoms(self, *types):
types = tuple(t if isinstance(t, type) else type(t) for t in types)
if not types:
types = (Atom,)
return self._eval_atoms(*types)
@property
def free_symbols(self):
return self._eval_free_symbols()
def has(self, *patterns):
return self._eval_has(*patterns)
def is_anti_symmetric(self, simplify=True):
simpfunc = simplify
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_anti_symmetric(simpfunc)
def is_diagonal(self):
return self._eval_is_diagonal()
@property
def is_hermitian(self, simplify=True):
if not self.is_square:
return False
simpfunc = simplify
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
return self._eval_is_hermetian(simpfunc)
@property
def is_Identity(self):
if not self.is_square:
return False
return self._eval_is_Identity()
@property
def is_lower_hessenberg(self):
return self._eval_is_lower_hessenberg()
@property
def is_lower(self):
return self._eval_is_lower()
@property
def is_square(self):
return self.rows == self.cols
def is_symbolic(self):
return self._eval_is_symbolic()
def is_symmetric(self, simplify=True):
simpfunc = simplify
if not isinstance(simplify, FunctionType):
simpfunc = _simplify if simplify else lambda x: x
if not self.is_square:
return False
return self._eval_is_symmetric(simpfunc)
@property
def is_upper_hessenberg(self):
return self._eval_is_upper_hessenberg()
@property
def is_upper(self):
return all(self[i, j].is_zero
for i in range(1, self.rows)
for j in range(i))
@property
def is_zero(self):
return self._eval_is_zero()
def values(self):
return self._eval_values()
class MatrixOperations(MatrixRequired):
def _eval_adjoint(self):
return self.transpose().conjugate()
def _eval_conjugate(self):
return self.applyfunc(lambda x: x.conjugate())
def _eval_trace(self):
return sum(self[i, i] for i in range(self.rows))
def _eval_transpose(self):
return self._new(self.cols, self.rows, lambda i, j: self[j, i])
def adjoint(self):
return self._eval_adjoint()
def applyfunc(self, f):
if not callable(f):
raise TypeError("`f` must be callable.")
out = self._new(self.rows, self.cols, [f(x) for x in self])
return out
def conjugate(self):
return self._eval_conjugate()
def doit(self, **kwargs):
return self.applyfunc(lambda x: x.doit())
def evalf(self, prec=None, **options):
return self.applyfunc(lambda i: i.evalf(prec, **options))
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
return self.applyfunc(lambda x: x.expand(
deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
**hints))
@property
def H(self):
return self.T.C
def refine(self, assumptions=True):
return self.applyfunc(lambda x: refine(x, assumptions))
def replace(self, F, G, map=False):
return self.applyfunc(lambda x: x.replace(F, G, map))
def simplify(self, ratio=1.7, measure=count_ops):
return self.applyfunc(lambda x: x.simplify(ratio, measure))
def subs(self, *args, **kwargs):
return self.applyfunc(lambda x: x.subs(*args, **kwargs))
def trace(self):
if not self.rows == self.cols:
raise NonSquareMatrixError()
return self._eval_trace()
def transpose(self):
return self._eval_transpose()
T = property(transpose, None, None, "Matrix transposition.")
C = property(conjugate, None, None, "By-element conjugation.")
n = evalf
def xreplace(self, rule):
return self.applyfunc(lambda x: x.xreplace(rule))
_eval_simplify = simplify
class MatrixBase(MatrixOperations, MatrixProperties, MatrixShaping):
__array_priority__ = 11
is_Matrix = True
is_Identity = None
_class_priority = 3
_sympify = staticmethod(sympify)
__hash__ = None
def __add__(self, other):
if getattr(other, 'is_Matrix', False):
A = self
B = other
if A.shape != B.shape:
raise ShapeError("Matrix size mismatch: %s + %s" % (
A.shape, B.shape))
alst = A.tolist()
blst = B.tolist()
ret = [S.Zero] * A.rows
for i in range(A.shape[0]):
ret[i] = [j + k for j, k in zip(alst[i], blst[i])]
rv = classof(A, B)._new(ret)
if 0 in A.shape:
rv = rv.reshape(*A.shape)
return rv
raise TypeError('cannot add matrix and %s' % type(other))
def __array__(self):
from .dense import matrix2numpy
return matrix2numpy(self)
def __div__(self, other):
return self * (S.One / other)
def __getattr__(self, attr):
if attr in ('diff', 'integrate', 'limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc(item_doit)
return doit
else:
raise AttributeError(
"%s has no attribute %s." % (self.__class__.__name__, attr))
def __len__(self):
return self.rows * self.cols
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i, j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def __mul__(self, other):
if getattr(other, 'is_Matrix', False):
A = self
B = other
if A.cols != B.rows:
raise ShapeError("Matrix size mismatch: %s * %s." % (
A.shape, B.shape))
if A.cols == 0:
return classof(A, B)._new(A.rows, B.cols, lambda i, j: 0)
try:
blst = B.T.tolist()
except AttributeError:
return NotImplemented
alst = A.tolist()
return classof(A, B)._new(A.rows, B.cols, lambda i, j:
reduce(lambda k, l: k + l,
[a_ik * b_kj for a_ik, b_kj in zip(alst[i], blst[j])]))
else:
return self._new(self.rows, self.cols,
[i * other for i in self._mat])
def __neg__(self):
return -1 * self
def _matrix_pow_by_jordan_blocks(self, num):
from sympy.matrices import diag, MutableMatrix
from sympy import binomial
def jordan_cell_power(jc, n):
N = jc.shape[0]
l = jc[0, 0]
if l == 0 and (n < N - 1) != False:
raise ValueError("Matrix det == 0; not invertible")
elif l == 0 and N > 1 and n % 1 != 0:
raise ValueError("Non-integer power cannot be evaluated")
for i in range(N):
for j in range(N-i):
bn = binomial(n, i)
if isinstance(bn, binomial):
bn = bn._eval_expand_func()
jc[j, i+j] = l**(n-i)*bn
P, jordan_cells = self.jordan_cells()
jordan_cells = [MutableMatrix(j) for j in jordan_cells]
for j in jordan_cells:
jordan_cell_power(j, num)
return self._new(P*diag(*jordan_cells)*P.inv())
def _matrix_pow_by_recursion(self, num):
from sympy.matrices import eye
n = int(num)
if n < 0:
return self.inv()**-n
a = eye(self.cols)
s = self
while n:
if n % 2:
a *= s
n -= 1
if not n:
break
s *= s
n //= 2
return self._new(a)
def __pow__(self, num):
if not self.is_square:
raise NonSquareMatrixError()
num = sympify(num)
if num.is_Number and num % 1 == 0:
if (self.rows == 1):
return self._new([[self[0]**num]])
elif self.rows == 2 and num > 100000:
try:
return self._matrix_pow_by_jordan_blocks(num)
except ValueError:
pass
return self._matrix_pow_by_recursion(num)
elif isinstance(num, Expr):
return self._matrix_pow_by_jordan_blocks(num)
else:
raise TypeError(
"Only SymPy expressions or integers are supported as exponent for matrices")
def __radd__(self, other):
return self + other
def __repr__(self):
return sstr(self)
def __rmul__(self, a):
if getattr(a, 'is_Matrix', False):
return self._new(a) * self
return self._new(self.rows, self.cols, [a * i for i in self._mat])
def __rsub__(self, a):
return (-self) + a
def __str__(self):
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
return "Matrix(%s)" % str(self.tolist())
def __sub__(self, a):
return self + (-a)
def __truediv__(self, other):
return self.__div__(other)
def _diagonalize_clear_subproducts(self):
del self._is_symbolic
del self._is_symmetric
del self._eigenvects
def _format_str(self, printer=None):
if not printer:
from sympy.printing.str import StrPrinter
printer = StrPrinter()
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
if self.rows == 1:
return "Matrix([%s])" % self.table(printer, rowsep=',\n')
return "Matrix([\n%s])" % self.table(printer, rowsep=',\n')
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
from sympy.matrices.sparse import SparseMatrix
flat_list = None
if len(args) == 1:
if isinstance(args[0], SparseMatrix):
return args[0].rows, args[0].cols, flatten(args[0].tolist())
elif isinstance(args[0], MatrixBase):
return args[0].rows, args[0].cols, args[0]._mat
elif isinstance(args[0], Basic) and args[0].is_Matrix:
return args[0].rows, args[0].cols, args[0].as_explicit()._mat
elif hasattr(args[0], "__array__"):
# numpy.array() and then make a python list out of it.
arr = args[0].__array__()
if len(arr.shape) == 2:
rows, cols = arr.shape[0], arr.shape[1]
flat_list = [cls._sympify(i) for i in arr.ravel()]
return rows, cols, flat_list
elif len(arr.shape) == 1:
rows, cols = arr.shape[0], 1
flat_list = [S.Zero] * rows
for i in range(len(arr)):
flat_list[i] = cls._sympify(arr[i])
return rows, cols, flat_list
else:
raise NotImplementedError(
"SymPy supports just 1D and 2D matrices")
# Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
elif is_sequence(args[0]) \
and not isinstance(args[0], DeferredVector):
in_mat = []
ncol = set()
for row in args[0]:
if isinstance(row, MatrixBase):
in_mat.extend(row.tolist())
if row.cols or row.rows: # only pay attention if it's not 0x0
ncol.add(row.cols)
else:
in_mat.append(row)
try:
ncol.add(len(row))
except TypeError:
ncol.add(1)
if len(ncol) > 1:
raise ValueError("Got rows of variable lengths: %s" %
sorted(list(ncol)))
cols = ncol.pop() if ncol else 0
rows = len(in_mat) if cols else 0
if rows:
if not is_sequence(in_mat[0]):
cols = 1
flat_list = [cls._sympify(i) for i in in_mat]
return rows, cols, flat_list
flat_list = []
for j in range(rows):
for i in range(cols):
flat_list.append(cls._sympify(in_mat[j][i]))
elif len(args) == 3:
rows = as_int(args[0])
cols = as_int(args[1])
if len(args) == 3 and isinstance(args[2], collections.Callable):
op = args[2]
flat_list = []
for i in range(rows):
flat_list.extend(
[cls._sympify(op(cls._sympify(i), cls._sympify(j)))
for j in range(cols)])
elif len(args) == 3 and is_sequence(args[2]):
flat_list = args[2]
if len(flat_list) != rows * cols:
raise ValueError(
'List length should be equal to rows*columns')
flat_list = [cls._sympify(i) for i in flat_list]
elif len(args) == 0:
rows = cols = 0
flat_list = []
if flat_list is None:
raise TypeError("Data type not understood")
return rows, cols, flat_list
def _jordan_block_structure(self):
jordan_block_structures = {}
_eigenvects = self.eigenvects()
ev = self.eigenvals()
if len(ev) == 0:
raise AttributeError("could not compute the eigenvalues")
for eigenval, multiplicity, vects in _eigenvects:
l_jordan_chains = {}
geometrical = len(vects)
if geometrical == multiplicity:
chains = []
for v in vects:
chain = [v]
chains.append(chain)
l_jordan_chains[1] = chains
jordan_block_structures[eigenval] = l_jordan_chains
elif geometrical == 0:
raise MatrixError(
"Matrix has the eigen vector with geometrical multiplicity equal zero.")
else:
from sympy.matrices import MutableMatrix
I = MutableMatrix.eye(self.rows)
l = eigenval
M = (self - l * I)
l_jordan_chains = {}
Ms = [I]
Ns = [[]]
a = [0]
smax = 0
M_new = Ms[-1] * M
Ns_new = M_new.nullspace()
a_new = len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
while a_new > a[
-1]:
a.append(a_new)
M_new = Ms[-1] * M
Ns_new = M_new.nullspace()
a_new = len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
smax += 1
# `E=Kernel(self-lI)^s / Kernel(self-lI)^(s-1)`
# Note that `Kernel(self-lI)^s = Kernel(Z) = V` (the whole vector space).
# So in the first step `s=smax` this restriction turns out to actually restrict nothing at all
# and the only remaining condition is to choose vectors in `Kernel(self-lI)^(s-1)`.
# Subsequently we compute `e_1=(self-lI)e_0`, `e_2=(self-lI)*e_1` and so on.
# The subspace `E` can have a dimension larger than one.
# That means that we have more than one Jordan block of size `s` for the eigenvalue `l`
# and as many Jordan chains (this is the case in the second example).
# In this case we start as many Jordan chains and have as many blocks of size `s` in the jcf.
# We now have all the Jordan blocks of size `s` but there might be others attached to the same
# eigenvalue that are smaller.
# So we will do the same procedure also for `s-1` and so on until 1 (the lowest possible order
# where the Jordan chain is of length 1 and just represented by the eigenvector).
for s in reversed(range(1, smax + 1)):
S = Ms[s]
# We want the vectors in `Kernel((self-lI)^s)`,
# but without those in `Kernel(self-lI)^s-1`
# so we will add their adjoints as additional equations
# to the system formed by `S` to get the orthogonal
# complement.
# (`S` will no longer be quadratic.)
exclude_vectors = Ns[s - 1]
for k in range(0, a[s - 1]):
S = S.col_join((exclude_vectors[k]).adjoint())
# We also want to exclude the vectors
# in the chains for the bigger blocks
# that we have already computed (if there are any).
# (That is why we start with the biggest s).
# Since Jordan blocks are not orthogonal in general
# (in the original space), only those chain vectors
# that are on level s (index `s-1` in a chain)
# are added.
for chain_list in l_jordan_chains.values():
for chain in chain_list:
S = S.col_join(chain[s - 1].adjoint())
e0s = S.nullspace()
# Determine the number of chain leaders
# for blocks of size `s`.
n_e0 = len(e0s)
s_chains = []
# s_cells=[]
for i in range(0, n_e0):
chain = [e0s[i]]
for k in range(1, s):
v = M * chain[k - 1]
chain.append(v)
# We want the chain leader appear as the last of the block.
chain.reverse()
s_chains.append(chain)
l_jordan_chains[s] = s_chains
jordan_block_structures[eigenval] = l_jordan_chains
return jordan_block_structures
def _jordan_split(self, algebraical, geometrical):
n1 = algebraical // geometrical
res = [n1] * geometrical
res[len(res) - 1] += algebraical % geometrical
assert sum(res) == algebraical
return res
def _setitem(self, key, value):
from .dense import Matrix
is_slice = isinstance(key, slice)
i, j = key = self.key2ij(key)
is_mat = isinstance(value, MatrixBase)
if type(i) is slice or type(j) is slice:
if is_mat:
self.copyin_matrix(key, value)
return
if not isinstance(value, Expr) and is_sequence(value):
self.copyin_list(key, value)
return
raise ValueError('unexpected value: %s' % value)
else:
if (not is_mat and
not isinstance(value, Basic) and is_sequence(value)):
value = Matrix(value)
is_mat = True
if is_mat:
if is_slice:
key = (slice(*divmod(i, self.cols)),
slice(*divmod(j, self.cols)))
else:
key = (slice(i, i + value.rows),
slice(j, j + value.cols))
self.copyin_matrix(key, value)
else:
return i, j, self._sympify(value)
return
def add(self, b):
return self + b
def adjugate(self, method="berkowitz"):
return self.cofactorMatrix(method).T
def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
return PurePoly(list(map(simplify, self.berkowitz()[-1])), x)
def berkowitz_det(self):
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
poly = self.berkowitz()[-1]
sign = (-1) ** (len(poly) - 1)
return sign * poly[-1]
def berkowitz_eigenvals(self, **flags):
return roots(self.berkowitz_charpoly(Dummy('x')), **flags)
def berkowitz_minors(self):
sign, minors = S.One, []
for poly in self.berkowitz():
minors.append(sign * poly[-1])
sign = -sign
return tuple(minors)
def berkowitz(self):
from sympy.matrices import zeros
berk = ((1,),)
if not self:
return berk
if not self.is_square:
raise NonSquareMatrixError()
A, N = self, self.rows
transforms = [0] * (N - 1)
for n in range(N, 1, -1):
T, k = zeros(n + 1, n), n - 1
R, C = -A[k, :k], A[:k, k]
A, a = A[:k, :k], -A[k, k]
items = [C]
for i in range(0, n - 2):
items.append(A * items[i])
for i, B in enumerate(items):
items[i] = (R * B)[0, 0]
items = [S.One, a] + items
for i in range(n):
T[i:, i] = items[:n - i + 1]
transforms[k - 1] = T
polys = [self._new([S.One, -A[0, 0]])]
for i, T in enumerate(transforms):
polys.append(T * polys[i])
return berk + tuple(map(tuple, polys))
def cholesky_solve(self, rhs):
if self.is_symmetric():
L = self._cholesky()
elif self.rows >= self.cols:
L = (self.T * self)._cholesky()
rhs = self.T * rhs
else:
raise NotImplementedError('Under-determined System. '
'Try M.gauss_jordan_solve(rhs)')
Y = L._lower_triangular_solve(rhs)
return (L.T)._upper_triangular_solve(Y)
def cholesky(self):
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._cholesky()
def cofactor(self, i, j, method="berkowitz"):
if (i + j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1 * self.minorEntry(i, j, method)
def cofactorMatrix(self, method="berkowitz"):
out = self._new(self.rows, self.cols, lambda i, j:
self.cofactor(i, j, method))
return out
def columnspace(self, simplify=False):
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
reduced, pivots = self.rref(simplify=simpfunc)
basis = []
# create a set of vectors for the basis
for i in range(self.cols):
if i in pivots:
basis.append(self.col(i))
return [self._new(b) for b in basis]
def condition_number(self):
if not self:
return S.Zero
singularvalues = self.singular_values()
return Max(*singularvalues) / Min(*singularvalues)
def as_real_imag(self):
return self.as_real_imag()
def copy(self):
return self._new(self.rows, self.cols, self._mat)
def cross(self, b):
if not is_sequence(b):
raise TypeError(
"`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if not (self.rows * self.cols == b.rows * b.cols == 3):
raise ShapeError("Dimensions incorrect for cross product: %s x %s" %
((self.rows, self.cols), (b.rows, b.cols)))
else:
return self._new(self.rows, self.cols, (
(self[1] * b[2] - self[2] * b[1]),
(self[2] * b[0] - self[0] * b[2]),
(self[0] * b[1] - self[1] * b[0])))
@property
def D(self):
from sympy.physics.matrices import mgamma
if self.rows != 4:
# In Python 3.2, properties can only return an AttributeError
# so we can't raise a ShapeError -- see commit which added the
raise AttributeError
return self.H * mgamma(0)
def det_bareis(self):
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy().as_mutable(), self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0] * M[1, 1] - M[0, 1] * M[1, 0]
elif n == 3:
det = (
M[0, 0] * M[1, 1] * M[2, 2] + M[0, 1] * M[1, 2] * M[2, 0] + M[
0, 2] * M[1, 0] * M[2, 1]) - \
(
M[0, 2] * M[1, 1] * M[2, 0] + M[0, 0] * M[1, 2] * M[2, 1] + M[
0, 1] * M[1, 0] * M[2, 2])
else:
sign = 1
for k in range(n - 1):
if M[k, k] == 0:
for i in range(k + 1, n):
if M[i, k]:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# form of Gaussian elimination algorithm
for i in range(k + 1, n):
for j in range(k + 1, n):
D = M[k, k] * M[i, j] - M[i, k] * M[k, j]
if k > 0:
D /= M[k - 1, k - 1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign * M[n - 1, n - 1]
return det.expand()
def det_LU_decomposition(self):
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy(), self.rows
p, prod = [], 1
l, u, p = M.LUdecomposition()
if len(p) % 2:
prod = -1
for k in range(n):
prod = prod * u[k, k] * l[k, k]
return prod.expand()
def det(self, method="bareis"):
# if methods were made internal and all determinant calculations
# passed through here, then these lines could be factored out of
# the method routines
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
elif method == "det_LU":
return self.det_LU_decomposition()
else:
raise ValueError("Determinant method '%s' unrecognized" % method)
def diagonal_solve(self, rhs):
if not self.is_diagonal:
raise TypeError("Matrix should be diagonal")
if rhs.rows != self.rows:
raise TypeError("Size mis-match")
return self._diagonal_solve(rhs)
def diagonalize(self, reals_only=False, sort=False, normalize=False):
from sympy.matrices import diag
if not self.is_square:
raise NonSquareMatrixError()
if not self.is_diagonalizable(reals_only, False):
self._diagonalize_clear_subproducts()
raise MatrixError("Matrix is not diagonalizable")
else:
if self._eigenvects is None:
self._eigenvects = self.eigenvects(simplify=True)
if sort:
self._eigenvects.sort(key=default_sort_key)
self._eigenvects.reverse()
diagvals = []
P = self._new(self.rows, 0, [])
for eigenval, multiplicity, vects in self._eigenvects:
for k in range(multiplicity):
diagvals.append(eigenval)
vec = vects[k]
if normalize:
vec = vec / vec.norm()
P = P.col_insert(P.cols, vec)
D = diag(*diagvals)
self._diagonalize_clear_subproducts()
return (P, D)
def diff(self, *args):
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].diff(*args))
def dot(self, b):
from .dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError(
"Dimensions incorrect for dot product: %s, %s" % (
self.shape, len(b)))
return self.dot(Matrix(b))
else:
raise TypeError(
"`b` must be an ordered iterable or Matrix, not %s." %
type(b))
mat = self
if mat.cols == b.rows:
if b.cols != 1:
mat = mat.T
b = b.T
prod = flatten((mat * b).tolist())
if len(prod) == 1:
return prod[0]
return prod
if mat.cols == b.cols:
return mat.dot(b.T)
elif mat.rows == b.rows:
return mat.T.dot(b)
else:
raise ShapeError("Dimensions incorrect for dot product: %s, %s" % (
self.shape, b.shape))
def dual(self):
from sympy import LeviCivita
from sympy.matrices import zeros
M, n = self[:, :], self.rows
work = zeros(n)
if self.is_symmetric():
return work
for i in range(1, n):
for j in range(1, n):
acum = 0
for k in range(1, n):
acum += LeviCivita(i, j, 0, k) * M[0, k]
work[i, j] = acum
work[j, i] = -acum
for l in range(1, n):
acum = 0
for a in range(1, n):
for b in range(1, n):
acum += LeviCivita(0, l, a, b) * M[a, b]
acum /= 2
work[0, l] = -acum
work[l, 0] = acum
return work
def eigenvals(self, **flags):
# roots doesn't like Floats, so replace them with Rationals
mat = self
if not mat:
return {}
if flags.pop('rational', True):
if any(v.has(Float) for v in mat):
mat = mat._new(mat.rows, mat.cols,
[nsimplify(v, rational=True) for v in mat])
flags.pop('simplify', None)
return mat.berkowitz_eigenvals(**flags)
def eigenvects(self, **flags):
from sympy.matrices import eye
simplify = flags.get('simplify', True)
primitive = bool(flags.get('simplify', False))
chop = flags.pop('chop', False)
flags.pop('multiple', None)
# roots doesn't like Floats, so replace them with Rationals
float = False
mat = self
if any(v.has(Float) for v in self):
float = True
mat = mat._new(mat.rows, mat.cols, [nsimplify(
v, rational=True) for v in mat])
flags['rational'] = False
out, vlist = [], mat.eigenvals(**flags)
vlist = list(vlist.items())
vlist.sort(key=default_sort_key)
flags.pop('rational', None)
for r, k in vlist:
tmp = mat.as_mutable() - eye(mat.rows) * r
basis = tmp.nullspace()
if not basis:
basis = tmp.nullspace(simplify=simplify)
if not basis:
raise NotImplementedError(
"Can't evaluate eigenvector for eigenvalue %s" % r)
if primitive:
# the relationship A*e = lambda*e will still hold if we change the
# eigenvector; so if simplify is True we tidy up any normalization
# artifacts with as_content_primtive (default) and remove any pure Integer
# denominators.
l = 1
for i, b in enumerate(basis[0]):
c, p = signsimp(b).as_content_primitive()
if c is not S.One:
b = c * p
l = ilcm(l, c.q)
basis[0][i] = b
if l != 1:
basis[0] *= l
if float:
out.append((r.evalf(chop=chop), k, [
mat._new(b).evalf(chop=chop) for b in basis]))
else:
out.append((r, k, [mat._new(b) for b in basis]))
return out
def exp(self):
if not self.is_square:
raise NonSquareMatrixError(
"Exponentiation is valid only for square matrices")
try:
P, cells = self.jordan_cells()
except MatrixError:
raise NotImplementedError(
"Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")
def _jblock_exponential(b):
# This function computes the matrix exponential for one single Jordan block
nr = b.rows
l = b[0, 0]
if nr == 1:
res = exp(l)
else:
from sympy import eye
# extract the diagonal part
d = b[0, 0] * eye(nr)
# and the nilpotent part
n = b - d
# compute its exponential
nex = eye(nr)
for i in range(1, nr):
nex = nex + n ** i / factorial(i)
# combine the two parts
res = exp(b[0, 0]) * nex
return (res)
blocks = list(map(_jblock_exponential, cells))
from sympy.matrices import diag
eJ = diag(*blocks)
# n = self.rows
ret = P * eJ * P.inv()
return type(self)(ret)
def gauss_jordan_solve(self, b, freevar=False):
from sympy.matrices import Matrix, zeros
aug = self.hstack(self.copy(), b.copy())
row, col = aug[:, :-1].shape
# solve by reduced row echelon form
A, pivots = aug.rref(simplify=True)
A, v = A[:, :-1], A[:, -1]
pivots = list(filter(lambda p: p < col, pivots))
rank = len(pivots)
# Bring to block form
permutation = Matrix(range(col)).T
A = A.vstack(A, permutation)
for i, c in enumerate(pivots):
A.col_swap(i, c)
A, permutation = A[:-1, :], A[-1, :]
# check for existence of solutions
# rank of aug Matrix should be equal to rank of coefficient matrix
if not v[rank:, 0].is_zero:
raise ValueError("Linear system has no solution")
# Get index of free symbols (free parameters)
free_var_index = permutation[
len(pivots):] # non-pivots columns are free variables
# Free parameters
dummygen = numbered_symbols("tau", Dummy)
tau = Matrix([next(dummygen) for k in range(col - rank)]).reshape(
col - rank, 1)
# Full parametric solution
V = A[:rank, rank:]
vt = v[:rank, 0]
free_sol = tau.vstack(vt - V * tau, tau)
# Undo permutation
sol = zeros(col, 1)
for k, v in enumerate(free_sol):
sol[permutation[k], 0] = v
if freevar:
return sol, tau, free_var_index
else:
return sol, tau
def get_diag_blocks(self):
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
@classmethod
def hstack(cls, *args):
kls = type(args[0])
return reduce(kls.row_join, args)
def integrate(self, *args):
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].integrate(*args))
def inv_mod(self, m):
from sympy.ntheory import totient
if not self.is_square:
raise NonSquareMatrixError()
N = self.cols
phi = totient(m)
det_K = self.det()
if gcd(det_K, m) != 1:
raise ValueError('Matrix is not invertible (mod %d)' % m)
det_inv = pow(int(det_K), int(phi - 1), int(m))
K_adj = self.cofactorMatrix().transpose()
K_inv = self.__class__(N, N,
[det_inv * K_adj[i, j] % m for i in range(N) for
j in range(N)])
return K_inv
def inverse_ADJ(self, iszerofunc=_iszero):
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
d = self.berkowitz_det()
zero = d.equals(0)
if zero is None:
# if equals() can't decide, will rref be able to?
ok = self.rref(simplify=True)[0]
zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
if zero:
raise ValueError("Matrix det == 0; not invertible.")
return self.adjugate() / d
def inverse_GE(self, iszerofunc=_iszero):
from .dense import Matrix
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
big = Matrix.hstack(self.as_mutable(), Matrix.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc, simplify=True)[0]
if any(iszerofunc(red[j, j]) for j in range(red.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self._new(red[:, big.rows:])
def inverse_LU(self, iszerofunc=_iszero):
if not self.is_square:
raise NonSquareMatrixError()
ok = self.rref(simplify=True)[0]
if any(iszerofunc(ok[j, j]) for j in range(ok.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inv(self, method=None, **kwargs):
if not self.is_square:
raise NonSquareMatrixError()
if method is not None:
kwargs['method'] = method
return self._eval_inverse(**kwargs)
def is_diagonalizable(self, reals_only=False, clear_subproducts=True):
if not self.is_square:
return False
res = False
self._is_symbolic = self.is_symbolic()
self._is_symmetric = self.is_symmetric()
self._eigenvects = None
self._eigenvects = self.eigenvects(simplify=True)
all_iscorrect = True
for eigenval, multiplicity, vects in self._eigenvects:
if len(vects) != multiplicity:
all_iscorrect = False
break
elif reals_only and not eigenval.is_real:
all_iscorrect = False
break
res = all_iscorrect
if clear_subproducts:
self._diagonalize_clear_subproducts()
return res
def is_nilpotent(self):
if not self:
return True
if not self.is_square:
raise NonSquareMatrixError(
"Nilpotency is valid only for square matrices")
x = Dummy('x')
if self.charpoly(x).args[0] == x ** self.rows:
return True
return False
def jacobian(self, X):
if not isinstance(X, MatrixBase):
X = self._new(X)
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
return self._new(m, n, lambda j, i: self[j].diff(X[i]))
def jordan_cell(self, eigenval, n):
n = int(n)
from sympy.matrices import MutableMatrix
out = MutableMatrix.zeros(n)
for i in range(n - 1):
out[i, i] = eigenval
out[i, i + 1] = 1
out[n - 1, n - 1] = eigenval
return type(self)(out)
def jordan_cells(self, calc_transformation=True):
n = self.rows
Jcells = []
Pcols_new = []
jordan_block_structures = self._jordan_block_structure()
from sympy.matrices import MutableMatrix
for eigenval in (
sorted(list(jordan_block_structures.keys()), key=default_sort_key)):
l_jordan_chains = jordan_block_structures[eigenval]
for s in reversed(sorted(
(l_jordan_chains).keys())):
s_chains = l_jordan_chains[s]
block = self.jordan_cell(eigenval, s)
number_of_s_chains = len(s_chains)
for i in range(0, number_of_s_chains):
Jcells.append(type(self)(block))
chain_vectors = s_chains[i]
lc = len(chain_vectors)
assert lc == s
for j in range(0, lc):
generalized_eigen_vector = chain_vectors[j]
Pcols_new.append(generalized_eigen_vector)
P = MutableMatrix.zeros(n)
for j in range(0, n):
P[:, j] = Pcols_new[j]
return type(self)(P), Jcells
def jordan_form(self, calc_transformation=True):
P, Jcells = self.jordan_cells()
from sympy.matrices import diag
J = diag(*Jcells)
return P, type(self)(J)
def key2bounds(self, keys):
islice, jslice = [isinstance(k, slice) for k in keys]
if islice:
if not self.rows:
rlo = rhi = 0
else:
rlo, rhi = keys[0].indices(self.rows)[:2]
else:
rlo = a2idx(keys[0], self.rows)
rhi = rlo + 1
if jslice:
if not self.cols:
clo = chi = 0
else:
clo, chi = keys[1].indices(self.cols)[:2]
else:
clo = a2idx(keys[1], self.cols)
chi = clo + 1
return rlo, rhi, clo, chi
def key2ij(self, key):
if is_sequence(key):
if not len(key) == 2:
raise TypeError('key must be a sequence of length 2')
return [a2idx(i, n) if not isinstance(i, slice) else i
for i, n in zip(key, self.shape)]
elif isinstance(key, slice):
return key.indices(len(self))[:2]
else:
return divmod(a2idx(key, len(self)), self.cols)
def LDLdecomposition(self):
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._LDLdecomposition()
def LDLsolve(self, rhs):
if self.is_symmetric():
L, D = self.LDLdecomposition()
elif self.rows >= self.cols:
L, D = (self.T * self).LDLdecomposition()
rhs = self.T * rhs
else:
raise NotImplementedError('Under-determined System. '
'Try M.gauss_jordan_solve(rhs)')
Y = L._lower_triangular_solve(rhs)
Z = D._diagonal_solve(Y)
return (L.T)._upper_triangular_solve(Z)
def left_eigenvects(self, **flags):
mat = self
left_transpose = mat.transpose().eigenvects(**flags)
left = []
for (ev, mult, ltmp) in left_transpose:
left.append((ev, mult, [l.transpose() for l in ltmp]))
return left
def limit(self, *args):
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].limit(*args))
def lower_triangular_solve(self, rhs):
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise ShapeError("Matrices size mismatch.")
if not self.is_lower:
raise ValueError("Matrix must be lower triangular.")
return self._lower_triangular_solve(rhs)
def LUdecomposition(self, iszerofunc=_iszero):
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i, j] = combined[i, j]
else:
if i == j:
L[i, i] = 1
U[i, j] = combined[i, j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
if not self.is_square:
raise NonSquareMatrixError(
"A Matrix must be square to apply LUdecomposition_Simple().")
n = self.rows
A = self.as_mutable()
p = []
for j in range(n):
for i in range(j):
for k in range(i):
A[i, j] = A[i, j] - A[i, k] * A[k, j]
pivot = -1
for i in range(j, n):
for k in range(j):
A[i, j] = A[i, j] - A[i, k] * A[k, j]
if pivot == -1 and not iszerofunc(A[i, j]):
pivot = i
if pivot < 0:
# the element may not be strictly zero, the supplied iszerofunc's evaluation gave True
raise ValueError("No nonzero pivot found; inversion failed.")
if pivot != j:
A.row_swap(pivot, j)
p.append([pivot, j])
scale = 1 / A[j, j]
for i in range(j + 1, n):
A[i, j] = A[i, j] * scale
return A, p
def LUdecompositionFF(self):
from sympy.matrices import SparseMatrix
zeros = SparseMatrix.zeros
eye = SparseMatrix.eye
n, m = self.rows, self.cols
U, L, P = self.as_mutable(), eye(n), eye(n)
DD = zeros(n, n)
oldpivot = 1
for k in range(n - 1):
if U[k, k] == 0:
for kpivot in range(k + 1, n):
if U[kpivot, k]:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L[k, k] = Ukk = U[k, k]
DD[k, k] = oldpivot * Ukk
for i in range(k + 1, n):
L[i, k] = Uik = U[i, k]
for j in range(k + 1, m):
U[i, j] = (Ukk * U[i, j] - U[k, j] * Uik) / oldpivot
U[i, k] = 0
oldpivot = Ukk
DD[n - 1, n - 1] = oldpivot
return P, L, DD, U
def LUsolve(self, rhs, iszerofunc=_iszero):
if rhs.rows != self.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm).as_mutable()
for i in range(n):
for j in range(i):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y * scale)
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y * scale)
scale = A[i, i]
b.row_op(i, lambda x, _: x / scale)
return rhs.__class__(b)
def minorEntry(self, i, j, method="berkowitz"):
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
return self.minorMatrix(i, j).det(method)
def minorMatrix(self, i, j):
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
M = self.as_mutable()
M.row_del(i)
M.col_del(j)
return self._new(M)
def multiply_elementwise(self, b):
from sympy.matrices import matrix_multiply_elementwise
return matrix_multiply_elementwise(self, b)
def multiply(self, b):
return self * b
def normalized(self):
if self.rows != 1 and self.cols != 1:
raise ShapeError("A Matrix must be a vector to normalize.")
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def norm(self, ord=None):
vals = list(self.values()) or [0]
if self.rows == 1 or self.cols == 1:
if ord == 2 or ord is None:
return sqrt(Add(*(abs(i) ** 2 for i in vals)))
elif ord == 1:
return Add(*(abs(i) for i in vals))
elif ord == S.Infinity:
return Max(*[abs(i) for i in vals])
elif ord == S.NegativeInfinity:
return Min(*[abs(i) for i in vals])
try:
return Pow(Add(*(abs(i) ** ord for i in vals)), S(1) / ord)
except (NotImplementedError, TypeError):
raise ValueError("Expected order to be Number, Symbol, oo")
else:
if ord == 2:
return Max(*self.singular_values())
elif ord == -2:
return Min(*self.singular_values())
elif (ord is None or isinstance(ord,
string_types) and ord.lower() in
['f', 'fro', 'frobenius', 'vector']):
return self.vec().norm(ord=2)
else:
raise NotImplementedError("Matrix Norms under development")
def nullspace(self, simplify=False):
from sympy.matrices import zeros
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
reduced, pivots = self.rref(simplify=simpfunc)
basis = []
for i in range(self.cols - len(pivots)):
basis.append(zeros(self.cols, 1))
basiskey, cur = [-1] * len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots:
basis[basiskey.index(i)][i, 0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i + 1, self.cols):
line = pivots.index(i)
v = reduced[line, j]
if simplify:
v = simpfunc(v)
if v:
if j in pivots:
# XXX: Is this the correct error?
raise NotImplementedError(
"Could not compute the nullspace of `self`.")
basis[basiskey.index(j)][i, 0] = -v
return [self._new(b) for b in basis]
def permuteBkwd(self, perm):
copy = self.copy()
for i in range(len(perm) - 1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
copy = self.copy()
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def pinv_solve(self, B, arbitrary_matrix=None):
from sympy.matrices import eye
A = self
A_pinv = self.pinv()
if arbitrary_matrix is None:
rows, cols = A.cols, B.cols
w = symbols('w:{0}_:{1}'.format(rows, cols), cls=Dummy)
arbitrary_matrix = self.__class__(cols, rows, w).T
return A_pinv * B + (eye(A.cols) - A_pinv * A) * arbitrary_matrix
def pinv(self):
A = self
AH = self.H
# Trivial case: pseudoinverse of all-zero matrix is its transpose.
if A.is_zero:
return AH
try:
if self.rows >= self.cols:
return (AH * A).inv() * AH
else:
return AH * (A * AH).inv()
except ValueError:
# Matrix is not full rank, so A*AH cannot be inverted.
raise NotImplementedError('Rank-deficient matrices are not yet '
'supported.')
def print_nonzero(self, symb="X"):
s = []
for i in range(self.rows):
line = []
for j in range(self.cols):
if self[i, j] == 0:
line.append(" ")
else:
line.append(str(symb))
s.append("[%s]" % ''.join(line))
print('\n'.join(s))
def project(self, v):
return v * (self.dot(v) / v.dot(v))
def QRdecomposition(self):
cls = self.__class__
mat = self.as_mutable()
if not mat.rows >= mat.cols:
raise MatrixError(
"The number of rows must be greater than columns")
n = mat.rows
m = mat.cols
rank = n
row_reduced = mat.rref()[0]
for i in range(row_reduced.rows):
if row_reduced.row(i).norm() == 0:
rank -= 1
if not rank == mat.cols:
raise MatrixError("The rank of the matrix must match the columns")
Q, R = mat.zeros(n, m), mat.zeros(m)
for j in range(m): # for each column vector
tmp = mat[:, j] # take original v
for i in range(j):
# subtract the project of mat on new vector
tmp -= Q[:, i] * mat[:, j].dot(Q[:, i])
tmp.expand()
# normalize it
R[j, j] = tmp.norm()
Q[:, j] = tmp / R[j, j]
if Q[:, j].norm() != 1:
raise NotImplementedError(
"Could not normalize the vector %d." % j)
for i in range(j):
R[i, j] = Q[:, i].dot(mat[:, j])
return cls(Q), cls(R)
def QRsolve(self, b):
Q, R = self.as_mutable().QRdecomposition()
y = Q.T * b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n - 1, -1, -1):
tmp = y[j, :]
for k in range(j + 1, n):
tmp -= R[j, k] * x[n - 1 - k]
x.append(tmp / R[j, j])
return self._new([row._mat for row in reversed(x)])
def rank(self, iszerofunc=_iszero, simplify=False):
row_reduced = self.rref(iszerofunc=iszerofunc, simplify=simplify)
rank = len(row_reduced[-1])
return rank
def refine(self, assumptions=True):
return self.applyfunc(lambda x: refine(x, assumptions))
def replace(self, F, G, map=False):
M = self[:, :]
return M.applyfunc(lambda x: x.replace(F, G, map))
def rref(self, iszerofunc=_iszero, simplify=False):
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
# pivot: index of next row to contain a pivot
pivot, r = 0, self.as_mutable()
# pivotlist: indices of pivot variables (non-free)
pivotlist = []
for i in range(r.cols):
if pivot == r.rows:
break
if simplify:
r[pivot, i] = simpfunc(r[pivot, i])
pivot_offset, pivot_val, assumed_nonzero, newly_determined = _find_reasonable_pivot(
r[pivot:, i], iszerofunc, simpfunc)
# `_find_reasonable_pivot` may have simplified
# some elements along the way. If they were simplified
# and then determined to be either zero or non-zero for
# sure, they are stored in the `newly_determined` list
for (offset, val) in newly_determined:
r[pivot + offset, i] = val
# if `pivot_offset` is None, this column has no
# pivot
if pivot_offset is None:
continue
# swap the pivot column into place
pivot_pos = pivot + pivot_offset
r.row_swap(pivot, pivot_pos)
r.row_op(pivot, lambda x, _: x / pivot_val)
for j in range(r.rows):
if j == pivot:
continue
pivot_val = r[j, i]
r.zip_row_op(j, pivot, lambda x, y: x - pivot_val * y)
pivotlist.append(i)
pivot += 1
return self._new(r), pivotlist
@property
def shape(self):
return (self.rows, self.cols)
def simplify(self, ratio=1.7, measure=count_ops):
return self.applyfunc(lambda x: x.simplify(ratio, measure))
def singular_values(self):
mat = self.as_mutable()
# Compute eigenvalues of A.H A
valmultpairs = (mat.H * mat).eigenvals()
# Expands result from eigenvals into a simple list
vals = []
for k, v in valmultpairs.items():
vals += [sqrt(k)] * v # dangerous! same k in several spots!
# sort them in descending order
vals.sort(reverse=True, key=default_sort_key)
return vals
def solve_least_squares(self, rhs, method='CH'):
if method == 'CH':
return self.cholesky_solve(rhs)
t = self.T
return (t * self).inv(method=method) * t * rhs
def solve(self, rhs, method='GE'):
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system. '
'Try M.gauss_jordan_solve(rhs)')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method) * rhs
def table(self, printer, rowstart='[', rowend=']', rowsep='\n',
colsep=', ', align='right'):
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return '[]'
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
s = printer._print(self[i, j])
res[-1].append(s)
maxlen[j] = max(len(s), maxlen[j])
# Patch strings together
align = {
'left': 'ljust',
'right': 'rjust',
'center': 'center',
'<': 'ljust',
'>': 'rjust',
'^': 'center',
}[align]
for i, row in enumerate(res):
for j, elem in enumerate(row):
row[j] = getattr(elem, align)(maxlen[j])
res[i] = rowstart + colsep.join(row) + rowend
return rowsep.join(res)
__matmul__ = __mul__
__rmatmul__ = __rmul__
def upper_triangular_solve(self, rhs):
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise TypeError("Matrix size mismatch.")
if not self.is_upper:
raise TypeError("Matrix is not upper triangular.")
return self._upper_triangular_solve(rhs)
def vech(self, diagonal=True, check_symmetry=True):
from sympy.matrices import zeros
c = self.cols
if c != self.rows:
raise ShapeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError(
"Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros(c * (c + 1) // 2, 1)
for j in range(c):
for i in range(j, c):
v[count] = self[i, j]
count += 1
else:
v = zeros(c * (c - 1) // 2, 1)
for j in range(c):
for i in range(j + 1, c):
v[count] = self[i, j]
count += 1
return v
@classmethod
def vstack(cls, *args):
kls = type(args[0])
return reduce(kls.col_join, args)
_eval_simplify = simplify
charpoly = berkowitz_charpoly
def classof(A, B):
try:
if A._class_priority > B._class_priority:
return A.__class__
else:
return B.__class__
except Exception:
pass
try:
import numpy
if isinstance(A, numpy.ndarray):
return B.__class__
if isinstance(B, numpy.ndarray):
return A.__class__
except Exception:
pass
raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
def a2idx(j, n=None):
if type(j) is not int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (j,))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j,))
return int(j)
def _find_reasonable_pivot(col, iszerofunc=_iszero, simpfunc=_simplify):
newly_determined = []
col = list(col)
# a column that contains a mix of floats and integers
# but at least one float is considered a numerical
# column, and so we do partial pivoting
if all(isinstance(x, (Float, Integer)) for x in col) and any(
isinstance(x, Float) for x in col):
col_abs = [abs(x) for x in col]
max_value = max(col_abs)
if iszerofunc(max_value):
# just because iszerofunc returned True, doesn't
if max_value != 0:
newly_determined = [(i, 0) for i, x in enumerate(col) if x != 0]
return (None, None, False, newly_determined)
index = col_abs.index(max_value)
return (index, col[index], False, newly_determined)
possible_zeros = []
for i, x in enumerate(col):
is_zero = iszerofunc(x)
if is_zero == False:
return (i, x, False, newly_determined)
possible_zeros.append(is_zero)
if all(possible_zeros):
# if everything is definitely zero, we have
# no pivot
return (None, None, False, newly_determined)
# PASS 2 (iszerofunc after simplify)
# we haven't found any for-sure non-zeros, so
# make a determination about and opportunistically
# simplify to see if we find something
for i, x in enumerate(col):
if possible_zeros[i] is not None:
continue
simped = simpfunc(x)
is_zero = iszerofunc(simped)
if is_zero == True or is_zero == False:
newly_determined.append((i, simped))
if is_zero == False:
return (i, simped, False, newly_determined)
possible_zeros[i] = is_zero
# after simplifying, some things that were recognized
# as zeros might be zeros
if all(possible_zeros):
# if everything is definitely zero, we have
# no pivot
return (None, None, False, newly_determined)
# PASS 3 (.equals(0))
# some expressions fail to simplify to zero, but
# `.equals(0)` evaluates to True. As a last-ditch
# attempt, apply `.equals` to these expressions
for i, x in enumerate(col):
if possible_zeros[i] is not None:
continue
if x.equals(S.Zero):
# `.iszero` may return False with
# an implicit assumption (e.g., `x.equals(0)`
# when `x` is a symbol), so only treat it
# as proved when `.equals(0)` returns True
possible_zeros[i] = True
newly_determined.append((i, S.Zero))
if all(possible_zeros):
return (None, None, False, newly_determined)
# at this point there is nothing that could definitely
# be a pivot. To maintain compatibility with existing
# behavior, we'll assume that an illdetermined thing is
i = possible_zeros.index(None)
return (i, col[i], True, newly_determined)
class _MinimalMatrix(object):
is_MatrixLike = True
_sympify = staticmethod(sympify)
_class_priority = 3
is_Matrix = True
is_MatrixExpr = False
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __init__(self, rows, cols=None, mat=None):
if isinstance(mat, FunctionType):
mat = list(mat(i, j) for i in range(rows) for j in range(cols))
try:
if cols is None and mat is None:
mat = rows
rows, cols = mat.shape
except AttributeError:
pass
try:
if cols is None and mat is None:
mat = rows
cols = len(mat[0])
rows = len(mat)
mat = [x for l in mat for x in l]
except (IndexError, TypeError):
pass
self.mat = tuple(self._sympify(x) for x in mat)
self.rows, self.cols = rows, cols
if self.rows is None or self.cols is None:
raise NotImplementedError("Cannot initialize matrix with given parameters")
def __getitem__(self, key):
def _normalize_slices(row_slice, col_slice):
if not isinstance(row_slice, slice):
row_slice = slice(row_slice, row_slice + 1, None)
row_slice = slice(*row_slice.indices(self.rows))
if not isinstance(col_slice, slice):
col_slice = slice(col_slice, col_slice + 1, None)
col_slice = slice(*col_slice.indices(self.cols))
return (row_slice, col_slice)
def _coord_to_index(i, j):
return i * self.cols + j
if isinstance(key, tuple):
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
i, j = _normalize_slices(i, j)
rowsList, colsList = list(range(self.rows))[i], \
list(range(self.cols))[j]
indices = (i * self.cols + j for i in rowsList for j in
colsList)
return self._new(len(rowsList), len(colsList),
list(self.mat[i] for i in indices))
# if the key is a tuple of ints, change
# it to an array index
key = _coord_to_index(i, j)
return self.mat[key]
def __eq__(self, other):
return self.shape == other.shape and list(self) == list(other)
def __len__(self):
return self.rows*self.cols
def __repr__(self):
return "_MinimalMatrix({}, {}, {})".format(self.rows, self.cols,
self.mat)
@property
def shape(self):
return (self.rows, self.cols)
| true | true |
f7fdc19e5ba88c5776654857744917519c8bb7b9 | 383 | py | Python | junction/devices/admin.py | theSage21/junction | ac713edcf56c41eb3f066da776a0a5d24e55b46a | [
"MIT"
] | 192 | 2015-01-12T06:21:24.000Z | 2022-03-10T09:57:37.000Z | junction/devices/admin.py | theSage21/junction | ac713edcf56c41eb3f066da776a0a5d24e55b46a | [
"MIT"
] | 621 | 2015-01-01T09:19:17.000Z | 2021-05-28T09:27:35.000Z | junction/devices/admin.py | theSage21/junction | ac713edcf56c41eb3f066da776a0a5d24e55b46a | [
"MIT"
] | 207 | 2015-01-05T16:39:06.000Z | 2022-02-15T13:18:15.000Z | # -*- coding: utf-8 -*-
from django.contrib import admin
from junction.base.admin import TimeAuditAdmin
from .models import Device
# Register your models here.
class DeviceAdmin(TimeAuditAdmin):
list_display = (
"uuid",
"verification_code",
"verification_code_sent_at",
) + TimeAuditAdmin.list_display
admin.site.register(Device, DeviceAdmin)
| 18.238095 | 46 | 0.707572 |
from django.contrib import admin
from junction.base.admin import TimeAuditAdmin
from .models import Device
class DeviceAdmin(TimeAuditAdmin):
list_display = (
"uuid",
"verification_code",
"verification_code_sent_at",
) + TimeAuditAdmin.list_display
admin.site.register(Device, DeviceAdmin)
| true | true |
f7fdc274be50a8252ecd3e7cf4fc1e9376cc1836 | 2,566 | py | Python | t2t_bert/utils/data_ops/dataset_ops.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 34 | 2018-12-19T01:00:57.000Z | 2021-03-26T09:36:37.000Z | t2t_bert/utils/data_ops/dataset_ops.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 11 | 2018-12-25T03:37:59.000Z | 2021-08-25T14:43:58.000Z | t2t_bert/utils/data_ops/dataset_ops.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 9 | 2018-12-27T08:00:44.000Z | 2020-06-08T03:05:14.000Z | from utils.data_ops import structure as structure_lib
def get_structure(dataset_or_iterator):
"""Returns the `tf.data.experimental.Structure` of a `Dataset` or `Iterator`.
Args:
dataset_or_iterator: A `tf.data.Dataset`, `tf.compat.v1.data.Iterator`, or
`IteratorV2`.
Returns:
A `tf.data.experimental.Structure` representing the structure of the
elements of `dataset_or_iterator`.
Raises:
TypeError: If `dataset_or_iterator` is not a dataset or iterator object.
"""
try:
ret = dataset_or_iterator._element_structure # pylint: disable=protected-access
if isinstance(ret, structure_lib.Structure):
return ret
except AttributeError:
pass
raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator object, "
"but got %s." % type(dataset_or_iterator))
def get_legacy_output_shapes(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator`.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_shapes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset`, `tf.compat.v1.data.Iterator`, or
`IteratorV2`.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of the given dataset or iterator.
"""
return get_structure(dataset_or_iterator)._to_legacy_output_shapes() # pylint: disable=protected-access
def get_legacy_output_types(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator`.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_types` property.
Args:
dataset_or_iterator: A `tf.data.Dataset`, `tf.compat.v1.data.Iterator`, or
`IteratorV2`.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return get_structure(dataset_or_iterator)._to_legacy_output_types() # pylint: disable=protected-access
def get_legacy_output_classes(dataset_or_iterator):
"""Returns the output classes of a `Dataset` or `Iterator`.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_classes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset`, `tf.compat.v1.data.Iterator`, or
`IteratorV2`.
Returns:
A nested structure of Python `type` or `tf.data.experimental.Structure`
objects corresponding to each component of an element of this dataset.
"""
return get_structure(dataset_or_iterator)._to_legacy_output_classes() # pylint: disable=protected-access | 42.065574 | 107 | 0.746298 | from utils.data_ops import structure as structure_lib
def get_structure(dataset_or_iterator):
try:
ret = dataset_or_iterator._element_structure
if isinstance(ret, structure_lib.Structure):
return ret
except AttributeError:
pass
raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator object, "
"but got %s." % type(dataset_or_iterator))
def get_legacy_output_shapes(dataset_or_iterator):
return get_structure(dataset_or_iterator)._to_legacy_output_shapes()
def get_legacy_output_types(dataset_or_iterator):
return get_structure(dataset_or_iterator)._to_legacy_output_types()
def get_legacy_output_classes(dataset_or_iterator):
return get_structure(dataset_or_iterator)._to_legacy_output_classes() | true | true |
f7fdc2f6f7ee603e52d2b7d7c42ebf21cbf52d56 | 3,360 | py | Python | src/zope/browserpage/viewpagetemplatefile.py | zopefoundation/zope.browserpage | 0965ae7606eebdc6b997da46a8b065a1e4afd8ba | [
"ZPL-2.1"
] | null | null | null | src/zope/browserpage/viewpagetemplatefile.py | zopefoundation/zope.browserpage | 0965ae7606eebdc6b997da46a8b065a1e4afd8ba | [
"ZPL-2.1"
] | 5 | 2016-03-24T07:52:49.000Z | 2020-10-08T15:14:26.000Z | src/zope/browserpage/viewpagetemplatefile.py | zopefoundation/zope.browserpage | 0965ae7606eebdc6b997da46a8b065a1e4afd8ba | [
"ZPL-2.1"
] | 1 | 2015-04-03T08:17:11.000Z | 2015-04-03T08:17:11.000Z | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""File-based page templates that can be used as methods on views.
"""
__docformat__ = 'restructuredtext'
from zope.component import getMultiAdapter
from zope.pagetemplate.engine import TrustedAppPT
from zope.pagetemplate.pagetemplatefile import PageTemplateFile
class ViewPageTemplateFile(TrustedAppPT, PageTemplateFile):
"""Page Templates used as methods of views defined as Python classes.
"""
def __init__(self, filename, _prefix=None, content_type=None):
_prefix = self.get_path_from_prefix(_prefix)
super(ViewPageTemplateFile, self).__init__(filename, _prefix)
if content_type is not None:
self.content_type = content_type
def pt_getContext(self, instance, request, **_kw):
# instance is a View component
namespace = super(ViewPageTemplateFile, self).pt_getContext(**_kw)
namespace['request'] = request
namespace['view'] = instance
namespace['context'] = context = instance.context
namespace['views'] = ViewMapper(context, request)
return namespace
def __call__(self, instance, *args, **keywords):
namespace = self.pt_getContext(
request=instance.request,
instance=instance, args=args, options=keywords)
debug_flags = instance.request.debug
s = self.pt_render(
namespace,
showtal=getattr(debug_flags, 'showTAL', 0),
sourceAnnotations=getattr(debug_flags, 'sourceAnnotations', 0),
)
response = instance.request.response
if not response.getHeader("Content-Type"):
response.setHeader("Content-Type", self.content_type)
return s
def __get__(self, instance, type):
return BoundPageTemplate(self, instance)
class ViewMapper(object):
def __init__(self, ob, request):
self.ob = ob
self.request = request
def __getitem__(self, name):
return getMultiAdapter((self.ob, self.request), name=name)
class BoundPageTemplate(object):
def __init__(self, pt, ob):
object.__setattr__(self, '__func__', pt)
object.__setattr__(self, '__self__', ob)
macros = property(lambda self: self.__func__.macros)
filename = property(lambda self: self.__func__.filename)
def __call__(self, *args, **kw):
if self.__self__ is None:
im_self, args = args[0], args[1:]
else:
im_self = self.__self__
return self.__func__(im_self, *args, **kw)
def __setattr__(self, name, v):
raise AttributeError("Can't set attribute", name)
def __repr__(self):
return "<BoundPageTemplateFile of %r>" % self.__self__
def NoTraverser(ob, request):
return None
| 35.744681 | 78 | 0.649702 | true | true | |
f7fdc3b6a60e3883b491028b2a74d931f5c6e2ad | 147 | py | Python | PapMinPy/author.py | KKGanguly/PapMinPy | 5ad699ee1dc948150b1e01d77f5e5fffef84c196 | [
"MIT"
] | 3 | 2018-03-22T10:57:30.000Z | 2018-03-23T12:07:58.000Z | PapMinPy/author.py | KKGanguly/PapMinPy | 5ad699ee1dc948150b1e01d77f5e5fffef84c196 | [
"MIT"
] | null | null | null | PapMinPy/author.py | KKGanguly/PapMinPy | 5ad699ee1dc948150b1e01d77f5e5fffef84c196 | [
"MIT"
] | null | null | null | import json
class Author:
surname=""
givenName=""
def toJSON(self):
return dict(surname=self.surname,givenName=self.givenName)
| 21 | 66 | 0.680272 | import json
class Author:
surname=""
givenName=""
def toJSON(self):
return dict(surname=self.surname,givenName=self.givenName)
| true | true |
f7fdc3def8907b373b431d91e6bc835bef2bd64f | 5,872 | py | Python | scout/commands/export/variant.py | mhkc/scout | a7162f28c0f3490c3f3376268118fa8e6072a9db | [
"BSD-3-Clause"
] | null | null | null | scout/commands/export/variant.py | mhkc/scout | a7162f28c0f3490c3f3376268118fa8e6072a9db | [
"BSD-3-Clause"
] | null | null | null | scout/commands/export/variant.py | mhkc/scout | a7162f28c0f3490c3f3376268118fa8e6072a9db | [
"BSD-3-Clause"
] | null | null | null | import os
import click
import logging
import datetime
from flask.cli import with_appcontext
from bson.json_util import dumps
from xlsxwriter import Workbook
from scout.export.variant import export_variants, export_verified_variants
from .utils import json_option
from scout.constants import CALLERS
from scout.constants.variants_export import VCF_HEADER, VERIFIED_VARIANTS_HEADER
from scout.server.extensions import store
LOG = logging.getLogger(__name__)
@click.command("verified", short_help="Export validated variants")
@click.option(
"-c",
"--collaborator",
help="Specify what collaborator to export variants from. Defaults to cust000",
)
@click.option("--outpath", help="Path to output file")
@click.option("--test", help="Use this flag to test the function", is_flag=True)
@with_appcontext
def verified(collaborator, test, outpath=None):
"""Export variants which have been verified for an institute
and write them to an excel file.
Args:
collaborator(str): institute id
test(bool): True if the function is called for testing purposes
outpath(str): path to output file
Returns:
written_files(int): number of written or simulated files
"""
written_files = 0
collaborator = collaborator or "cust000"
LOG.info("Exporting verified variants for cust {}".format(collaborator))
adapter = store
verified_vars = adapter.verified(institute_id=collaborator)
LOG.info("FOUND {} verified variants for institute {}".format(len(verified_vars), collaborator))
if not verified_vars:
LOG.warning(
"There are no verified variants for institute {} in database!".format(collaborator)
)
return None
unique_callers = set()
for var_type, var_callers in CALLERS.items():
for caller in var_callers:
unique_callers.add(caller.get("id"))
document_lines = export_verified_variants(verified_vars, unique_callers)
today = datetime.datetime.now().strftime("%Y-%m-%d")
document_name = ".".join(["verified_variants", collaborator, today]) + ".xlsx"
# If this was a test and lines are created return success
if test and document_lines:
written_files += 1
LOG.info("Success. Verified variants file contains {} lines".format(len(document_lines)))
return written_files
if test:
LOG.info(
"Could not create document lines. Verified variants not found for customer {}".format(
collaborator
)
)
return
# create workbook and new sheet
# set up outfolder
if not outpath:
outpath = str(os.getcwd())
workbook = Workbook(os.path.join(outpath, document_name))
Report_Sheet = workbook.add_worksheet()
# Write the column header
row = 0
for col, field in enumerate(VERIFIED_VARIANTS_HEADER):
Report_Sheet.write(row, col, field)
# Write variant lines, after header (start at line 1)
for row, line in enumerate(document_lines, 1): # each line becomes a row in the document
for col, field in enumerate(line): # each field in line becomes a cell
Report_Sheet.write(row, col, field)
workbook.close()
if os.path.exists(os.path.join(outpath, document_name)):
LOG.info(
"Success. Verified variants file of {} lines was written to disk".format(
len(document_lines)
)
)
written_files += 1
return written_files
@click.command("variants", short_help="Export variants")
@click.option(
"-c",
"--collaborator",
help="Specify what collaborator to export variants from. Defaults to cust000",
)
@click.option("-d", "--document-id", help="Search for a specific variant")
@click.option("--case-id", help="Find causative variants for case")
@json_option
@with_appcontext
def variants(collaborator, document_id, case_id, json):
"""Export causatives for a collaborator in .vcf format"""
LOG.info("Running scout export variants")
adapter = store
collaborator = collaborator or "cust000"
variants = export_variants(adapter, collaborator, document_id=document_id, case_id=case_id)
if json:
click.echo(dumps([var for var in variants]))
return
vcf_header = VCF_HEADER
# If case_id is given, print more complete vcf entries, with INFO,
# and genotypes
if case_id:
vcf_header[-1] = vcf_header[-1] + "\tFORMAT"
case_obj = adapter.case(case_id=case_id)
for individual in case_obj["individuals"]:
vcf_header[-1] = vcf_header[-1] + "\t" + individual["individual_id"]
# print header
for line in vcf_header:
click.echo(line)
for variant_obj in variants:
variant_string = get_vcf_entry(variant_obj, case_id=case_id)
click.echo(variant_string)
def get_vcf_entry(variant_obj, case_id=None):
"""
Get vcf entry from variant object
Args:
variant_obj(dict)
Returns:
variant_string(str): string representing variant in vcf format
"""
if variant_obj["category"] == "snv":
var_type = "TYPE"
else:
var_type = "SVTYPE"
info_field = ";".join(
[
"END=" + str(variant_obj["end"]),
var_type + "=" + variant_obj["sub_category"].upper(),
]
)
variant_string = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}".format(
variant_obj["chromosome"],
variant_obj["position"],
variant_obj["dbsnp_id"],
variant_obj["reference"],
variant_obj["alternative"],
variant_obj["quality"],
";".join(variant_obj["filters"]),
info_field,
)
if case_id:
variant_string += "\tGT"
for sample in variant_obj["samples"]:
variant_string += "\t" + sample["genotype_call"]
return variant_string
| 31.913043 | 100 | 0.661785 | import os
import click
import logging
import datetime
from flask.cli import with_appcontext
from bson.json_util import dumps
from xlsxwriter import Workbook
from scout.export.variant import export_variants, export_verified_variants
from .utils import json_option
from scout.constants import CALLERS
from scout.constants.variants_export import VCF_HEADER, VERIFIED_VARIANTS_HEADER
from scout.server.extensions import store
LOG = logging.getLogger(__name__)
@click.command("verified", short_help="Export validated variants")
@click.option(
"-c",
"--collaborator",
help="Specify what collaborator to export variants from. Defaults to cust000",
)
@click.option("--outpath", help="Path to output file")
@click.option("--test", help="Use this flag to test the function", is_flag=True)
@with_appcontext
def verified(collaborator, test, outpath=None):
written_files = 0
collaborator = collaborator or "cust000"
LOG.info("Exporting verified variants for cust {}".format(collaborator))
adapter = store
verified_vars = adapter.verified(institute_id=collaborator)
LOG.info("FOUND {} verified variants for institute {}".format(len(verified_vars), collaborator))
if not verified_vars:
LOG.warning(
"There are no verified variants for institute {} in database!".format(collaborator)
)
return None
unique_callers = set()
for var_type, var_callers in CALLERS.items():
for caller in var_callers:
unique_callers.add(caller.get("id"))
document_lines = export_verified_variants(verified_vars, unique_callers)
today = datetime.datetime.now().strftime("%Y-%m-%d")
document_name = ".".join(["verified_variants", collaborator, today]) + ".xlsx"
if test and document_lines:
written_files += 1
LOG.info("Success. Verified variants file contains {} lines".format(len(document_lines)))
return written_files
if test:
LOG.info(
"Could not create document lines. Verified variants not found for customer {}".format(
collaborator
)
)
return
if not outpath:
outpath = str(os.getcwd())
workbook = Workbook(os.path.join(outpath, document_name))
Report_Sheet = workbook.add_worksheet()
row = 0
for col, field in enumerate(VERIFIED_VARIANTS_HEADER):
Report_Sheet.write(row, col, field)
for row, line in enumerate(document_lines, 1):
for col, field in enumerate(line):
Report_Sheet.write(row, col, field)
workbook.close()
if os.path.exists(os.path.join(outpath, document_name)):
LOG.info(
"Success. Verified variants file of {} lines was written to disk".format(
len(document_lines)
)
)
written_files += 1
return written_files
@click.command("variants", short_help="Export variants")
@click.option(
"-c",
"--collaborator",
help="Specify what collaborator to export variants from. Defaults to cust000",
)
@click.option("-d", "--document-id", help="Search for a specific variant")
@click.option("--case-id", help="Find causative variants for case")
@json_option
@with_appcontext
def variants(collaborator, document_id, case_id, json):
LOG.info("Running scout export variants")
adapter = store
collaborator = collaborator or "cust000"
variants = export_variants(adapter, collaborator, document_id=document_id, case_id=case_id)
if json:
click.echo(dumps([var for var in variants]))
return
vcf_header = VCF_HEADER
if case_id:
vcf_header[-1] = vcf_header[-1] + "\tFORMAT"
case_obj = adapter.case(case_id=case_id)
for individual in case_obj["individuals"]:
vcf_header[-1] = vcf_header[-1] + "\t" + individual["individual_id"]
for line in vcf_header:
click.echo(line)
for variant_obj in variants:
variant_string = get_vcf_entry(variant_obj, case_id=case_id)
click.echo(variant_string)
def get_vcf_entry(variant_obj, case_id=None):
if variant_obj["category"] == "snv":
var_type = "TYPE"
else:
var_type = "SVTYPE"
info_field = ";".join(
[
"END=" + str(variant_obj["end"]),
var_type + "=" + variant_obj["sub_category"].upper(),
]
)
variant_string = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}".format(
variant_obj["chromosome"],
variant_obj["position"],
variant_obj["dbsnp_id"],
variant_obj["reference"],
variant_obj["alternative"],
variant_obj["quality"],
";".join(variant_obj["filters"]),
info_field,
)
if case_id:
variant_string += "\tGT"
for sample in variant_obj["samples"]:
variant_string += "\t" + sample["genotype_call"]
return variant_string
| true | true |
f7fdc47afa8d16ed2a4347263911ee6667d5b9ef | 74,684 | py | Python | pandas/io/sql.py | tyuyoshi/pandas | 4e034ec0006b6c05160ce67ea1420ce28f295c91 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2022-03-03T11:26:55.000Z | 2022-03-03T11:26:55.000Z | pandas/io/sql.py | tyuyoshi/pandas | 4e034ec0006b6c05160ce67ea1420ce28f295c91 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-12-01T03:10:17.000Z | 2021-12-23T20:27:21.000Z | pandas/io/sql.py | tyuyoshi/pandas | 4e034ec0006b6c05160ce67ea1420ce28f295c91 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import annotations
from contextlib import contextmanager
from datetime import (
date,
datetime,
time,
)
from functools import partial
import re
from typing import (
Any,
Iterator,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas._typing import DtypeArg
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_dict_like,
is_list_like,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
from pandas import get_option
from pandas.core.api import (
DataFrame,
Series,
)
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.tools.datetimes import to_datetime
from pandas.util.version import Version
class DatabaseError(OSError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
def _gt14() -> bool:
"""
Check if sqlalchemy.__version__ is at least 1.4.0, when several
deprecations were made.
"""
import sqlalchemy
return Version(sqlalchemy.__version__) >= Version("1.4.0")
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
if params is not None:
if hasattr(params, "keys"): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _process_parse_dates_argument(parse_dates):
"""Process parse_dates argument for read_sql functions"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, "__iter__"):
parse_dates = [parse_dates]
return parse_dates
def _handle_date_column(
col, utc: bool | None = None, format: str | dict[str, Any] | None = None
):
if isinstance(format, dict):
# GH35185 Allow custom error values in parse_dates argument of
# read_sql like functions.
# Format can take on custom to_datetime argument values such as
# {"errors": "coerce"} or {"dayfirst": True}
error = format.pop("errors", None) or "ignore"
return to_datetime(col, errors=error, **format)
else:
# Allow passing of formatting string for integers
# GH17855
if format is None and (
issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)
):
format = "s"
if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
return to_datetime(col, errors="coerce", unit=format, utc=utc)
elif is_datetime64tz_dtype(col.dtype):
# coerce to UTC timezone
# GH11216
return to_datetime(col, utc=True)
else:
return to_datetime(col, errors="coerce", format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.items():
if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def _wrap_result(
data,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: DtypeArg | None = None,
):
"""Wrap result set of query in a DataFrame."""
frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
if dtype:
frame = frame.astype(dtype)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
pandas_sql = pandasSQL_builder(con)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
@overload
def read_sql_table(
table_name,
con,
schema=...,
index_col=...,
coerce_float=...,
parse_dates=...,
columns=...,
chunksize: None = ...,
) -> DataFrame:
...
@overload
def read_sql_table(
table_name,
con,
schema=...,
index_col=...,
coerce_float=...,
parse_dates=...,
columns=...,
chunksize: int = ...,
) -> Iterator[DataFrame]:
...
def read_sql_table(
table_name: str,
con,
schema: str | None = None,
index_col: str | Sequence[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
chunksize: int | None = None,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
A database URI could be provided as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
"""
pandas_sql = pandasSQL_builder(con, schema=schema)
if not pandas_sql.has_table(table_name):
raise ValueError(f"Table {table_name} not found")
table = pandas_sql.read_table(
table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
if table is not None:
return table
else:
raise ValueError(f"Table {table_name} not found", con)
@overload
def read_sql_query(
sql,
con,
index_col=...,
coerce_float=...,
params=...,
parse_dates=...,
chunksize: None = ...,
dtype: DtypeArg | None = ...,
) -> DataFrame:
...
@overload
def read_sql_query(
sql,
con,
index_col=...,
coerce_float=...,
params=...,
parse_dates=...,
chunksize: int = ...,
dtype: DtypeArg | None = ...,
) -> Iterator[DataFrame]:
...
def read_sql_query(
sql,
con,
index_col=None,
coerce_float: bool = True,
params=None,
parse_dates=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : str SQL query or SQLAlchemy Selectable (select or text object)
SQL query to be executed.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}.
.. versionadded:: 1.3.0
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC.
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
dtype=dtype,
)
@overload
def read_sql(
sql,
con,
index_col=...,
coerce_float=...,
params=...,
parse_dates=...,
columns=...,
chunksize: None = ...,
) -> DataFrame:
...
@overload
def read_sql(
sql,
con,
index_col=...,
coerce_float=...,
params=...,
parse_dates=...,
columns=...,
chunksize: int = ...,
) -> Iterator[DataFrame]:
...
def read_sql(
sql,
con,
index_col: str | Sequence[str] | None = None,
coerce_float: bool = True,
params=None,
parse_dates=None,
columns=None,
chunksize: int | None = None,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy connectable; str
connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
Read data from SQL via either a SQL query or a SQL tablename.
When using a SQLite database only SQL queries are accepted,
providing only the SQL tablename will result in an error.
>>> from sqlite3 import connect
>>> conn = connect(':memory:')
>>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
... columns=['int_column', 'date_column'])
>>> df.to_sql('test_data', conn)
2
>>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
int_column date_column
0 0 10/11/12
1 1 12/11/10
>>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
Apply date parsing to columns through the ``parse_dates`` argument
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates=["date_column"])
int_column date_column
0 0 2012-10-11
1 1 2010-12-11
The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
Custom argument values for applying ``pd.to_datetime`` on a column are specified
via a dictionary format:
1. Ignore errors while parsing the values of "date_column"
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"errors": "ignore"}})
int_column date_column
0 0 2012-10-11
1 1 2010-12-11
2. Apply a dayfirst date parsing order on the values of "date_column"
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"dayfirst": True}})
int_column date_column
0 0 2012-11-10
1 1 2010-11-12
3. Apply custom formatting when date parsing the values of "date_column"
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"format": "%d/%m/%y"}})
int_column date_column
0 0 2012-11-10
1 1 2010-11-12
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
try:
_is_table_name = pandas_sql.has_table(sql)
except Exception:
# using generic exception to catch errors from sql drivers (GH24988)
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(bind=pandas_sql.connectable, only=[sql])
return pandas_sql.read_table(
sql,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
else:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
def to_sql(
frame,
name: str,
con,
schema: str | None = None,
if_exists: str = "fail",
index: bool = True,
index_label=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
method: str | None = None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame, Series
name : str
Name of SQL table.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : str, optional
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : bool, default True
Write DataFrame index as a column.
index_label : str or sequence, optional
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 fallback mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
- None : Uses standard SQL ``INSERT`` clause (one per row).
- ``'multi'``: Pass multiple values in a single ``INSERT`` clause.
- callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
engine : {'auto', 'sqlalchemy'}, default 'auto'
SQL engine library to use. If 'auto', then the option
``io.sql.engine`` is used. The default ``io.sql.engine``
behavior is 'sqlalchemy'
.. versionadded:: 1.3.0
**engine_kwargs
Any additional kwargs are passed to the engine.
Returns
-------
None or int
Number of rows affected by to_sql. None is returned if the callable
passed into ``method`` does not return the number of rows.
.. versionadded:: 1.4.0
Notes
-----
The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor``
or SQLAlchemy connectable. The returned value may not reflect the exact number of written
rows as stipulated in the
`sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or
`SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__
""" # noqa:E501
if if_exists not in ("fail", "replace", "append"):
raise ValueError(f"'{if_exists}' is not valid for if_exists")
pandas_sql = pandasSQL_builder(con, schema=schema)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError(
"'frame' argument should be either a Series or a DataFrame"
)
return pandas_sql.to_sql(
frame,
name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
chunksize=chunksize,
dtype=dtype,
method=method,
engine=engine,
**engine_kwargs,
)
def has_table(table_name: str, con, schema: str | None = None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table.
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def pandasSQL_builder(con, schema: str | None = None):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters.
"""
import sqlite3
if isinstance(con, sqlite3.Connection) or con is None:
return SQLiteDatabase(con)
sqlalchemy = import_optional_dependency("sqlalchemy")
if isinstance(con, str):
con = sqlalchemy.create_engine(con)
if isinstance(con, sqlalchemy.engine.Connectable):
return SQLDatabase(con, schema=schema)
raise ValueError(
"pandas only support SQLAlchemy connectable(engine/connection) or"
"database string URI or sqlite3 DBAPI2 connection"
)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type conversions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(
self,
name: str,
pandas_sql_engine,
frame=None,
index=True,
if_exists="fail",
prefix="pandas",
index_label=None,
schema=None,
keys=None,
dtype: DtypeArg | None = None,
):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError(f"Could not init table '{name}'")
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
if _gt14():
self.table = self.table.to_metadata(self.pd_sql.meta)
else:
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create(bind=self.pd_sql.connectable)
def create(self):
if self.exists():
if self.if_exists == "fail":
raise ValueError(f"Table '{self.name}' already exists.")
elif self.if_exists == "replace":
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == "append":
pass
else:
raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
else:
self._execute_create()
def _execute_insert(self, conn, keys: list[str], data_iter) -> int:
"""
Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
"""
data = [dict(zip(keys, row)) for row in data_iter]
result = conn.execute(self.table.insert(), data)
return result.rowcount
def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int:
"""
Alternative to _execute_insert for DBs support multivalue INSERT.
Note: multi-value insert is usually faster for analytics DBs
and tables containing a few columns
but performance degrades quickly with increase of columns.
"""
from sqlalchemy import insert
data = [dict(zip(keys, row)) for row in data_iter]
stmt = insert(self.table).values(data)
result = conn.execute(stmt)
return result.rowcount
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(f"duplicate name in index/columns: {err}") from err
else:
temp = self.frame
column_names = list(map(str, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
for i, (_, ser) in enumerate(temp.items()):
vals = ser._values
if vals.dtype.kind == "M":
d = vals.to_pydatetime()
elif vals.dtype.kind == "m":
# store as integers, see GH#6921, GH#7076
d = vals.view("i8").astype(object)
else:
d = vals.astype(object)
assert isinstance(d, np.ndarray), type(d)
if ser._can_hold_na:
# Note: this will miss timedeltas since they are converted to int
mask = isna(d)
d[mask] = None
# error: No overload variant of "__setitem__" of "list" matches
# argument types "int", "ndarray"
data_list[i] = d # type: ignore[call-overload]
return column_names, data_list
def insert(
self, chunksize: int | None = None, method: str | None = None
) -> int | None:
# set insert method
if method is None:
exec_insert = self._execute_insert
elif method == "multi":
exec_insert = self._execute_insert_multi
elif callable(method):
exec_insert = partial(method, self)
else:
raise ValueError(f"Invalid parameter `method`: {method}")
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return 0
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
chunks = (nrows // chunksize) + 1
total_inserted = 0
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))
num_inserted = exec_insert(conn, keys, chunk_iter)
if num_inserted is None:
total_inserted = None
else:
total_inserted += num_inserted
return total_inserted
def _query_iterator(
self,
result,
chunksize: str | None,
columns,
coerce_float: bool = True,
parse_dates=None,
):
"""Return generator through chunked result set."""
has_read_data = False
while True:
data = result.fetchmany(chunksize)
if not data:
if not has_read_data:
yield DataFrame.from_records(
[], columns=columns, coerce_float=coerce_float
)
break
else:
has_read_data = True
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
from sqlalchemy import select
if columns is not None and len(columns) > 0:
cols = [self.table.c[n] for n in columns]
if self.index is not None:
for idx in self.index[::-1]:
cols.insert(0, self.table.c[idx])
sql_select = select(*cols) if _gt14() else select(cols)
else:
sql_select = select(self.table) if _gt14() else self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
column_names,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
f"levels, which is {nlevels}"
)
else:
return index_label
# return the used column labels for the index columns
if (
nlevels == 1
and "index" not in self.frame.columns
and self.frame.index.name is None
):
return ["index"]
else:
return com.fill_missing_names(self.frame.index.names)
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, str):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(self.frame.index._get_level_values(i))
column_names_and_types.append((str(idx_label), idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import (
Column,
PrimaryKeyConstraint,
Table,
)
from sqlalchemy.schema import MetaData
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
columns = [
Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types
]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
meta = MetaData()
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (
col_type is datetime
or col_type is date
or col_type is DatetimeTZDtype
):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype("int64") or col_type is bool:
self.frame[col_name] = df_col.astype(col_type, copy=False)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, col):
dtype: DtypeArg = self.dtype or {}
if is_dict_like(dtype):
dtype = cast(dict, dtype)
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
TIMESTAMP,
BigInteger,
Boolean,
Date,
DateTime,
Float,
Integer,
SmallInteger,
Text,
Time,
)
if col_type == "datetime64" or col_type == "datetime":
# GH 9086: TIMESTAMP is the suggested type if the column contains
# timezone information
try:
if col.dt.tz is not None:
return TIMESTAMP(timezone=True)
except AttributeError:
# The column is actually a DatetimeIndex
# GH 26761 or an Index with date-like data e.g. 9999-01-01
if getattr(col, "tz", None) is not None:
return TIMESTAMP(timezone=True)
return DateTime
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=find_stack_level(),
)
return BigInteger
elif col_type == "floating":
if col.dtype == "float32":
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == "integer":
# GH35076 Map pandas integer to optimal SQLAlchemy integer type
if col.dtype.name.lower() in ("int8", "uint8", "int16"):
return SmallInteger
elif col.dtype.name.lower() in ("uint16", "int32"):
return Integer
elif col.dtype.name.lower() == "uint64":
raise ValueError("Unsigned 64 bit integer datatype is not supported")
else:
return BigInteger
elif col_type == "boolean":
return Boolean
elif col_type == "date":
return Date
elif col_type == "time":
return Time
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (
TIMESTAMP,
Boolean,
Date,
DateTime,
Float,
Integer,
)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype("int64")
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql.
"""
def read_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
) -> int | None:
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
class BaseEngine:
def insert_records(
self,
table: SQLTable,
con,
frame,
name,
index=True,
schema=None,
chunksize=None,
method=None,
**engine_kwargs,
) -> int | None:
"""
Inserts data into already-prepared table
"""
raise AbstractMethodError(self)
class SQLAlchemyEngine(BaseEngine):
def __init__(self):
import_optional_dependency(
"sqlalchemy", extra="sqlalchemy is required for SQL support."
)
def insert_records(
self,
table: SQLTable,
con,
frame,
name,
index=True,
schema=None,
chunksize=None,
method=None,
**engine_kwargs,
) -> int | None:
from sqlalchemy import exc
try:
return table.insert(chunksize=chunksize, method=method)
except exc.SQLAlchemyError as err:
# GH34431
# https://stackoverflow.com/a/67358288/6067848
msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?#
)|inf can not be used with MySQL"""
err_text = str(err.orig)
if re.search(msg, err_text):
raise ValueError("inf cannot be used with MySQL") from err
else:
raise err
def get_engine(engine: str) -> BaseEngine:
"""return our implementation"""
if engine == "auto":
engine = get_option("io.sql.engine")
if engine == "auto":
# try engines in this order
engine_classes = [SQLAlchemyEngine]
error_msgs = ""
for engine_class in engine_classes:
try:
return engine_class()
except ImportError as err:
error_msgs += "\n - " + str(err)
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'sqlalchemy'.\n"
"A suitable version of "
"sqlalchemy is required for sql I/O "
"support.\n"
"Trying to import the above resulted in these errors:"
f"{error_msgs}"
)
elif engine == "sqlalchemy":
return SQLAlchemyEngine()
raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
class SQLDatabase(PandasSQL):
"""
This class enables conversion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction.
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
"""
def __init__(self, engine, schema: str | None = None):
from sqlalchemy.schema import MetaData
self.connectable = engine
self.meta = MetaData(schema=schema)
@contextmanager
def run_transaction(self):
from sqlalchemy.engine import Engine
if isinstance(self.connectable, Engine):
with self.connectable.connect() as conn:
with conn.begin():
yield conn
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execution_options().execute(*args, **kwargs)
def read_table(
self,
table_name: str,
index_col: str | Sequence[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
schema: str | None = None,
chunksize: int | None = None,
):
"""
Read SQL database table into a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
@staticmethod
def _query_iterator(
result,
chunksize: int,
columns,
index_col=None,
coerce_float=True,
parse_dates=None,
dtype: DtypeArg | None = None,
):
"""Return generator through chunked result set"""
has_read_data = False
while True:
data = result.fetchmany(chunksize)
if not data:
if not has_read_data:
yield _wrap_result(
[],
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
break
else:
has_read_data = True
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
def read_query(
self,
sql: str,
index_col: str | None = None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
):
"""
Read SQL query into a DataFrame.
Parameters
----------
sql : str
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : bool, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
.. versionadded:: 1.3.0
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
else:
data = result.fetchall()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
return frame
read_sql = read_query
def prep_table(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
dtype: DtypeArg | None = None,
) -> SQLTable:
"""
Prepares table in the database for data insertion. Creates it if needed, etc.
"""
if dtype:
if not is_dict_like(dtype):
# error: Value expression in dictionary comprehension has incompatible
# type "Union[ExtensionDtype, str, dtype[Any], Type[object],
# Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
# Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]"; expected type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"
dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
else:
dtype = cast(dict, dtype)
from sqlalchemy.types import (
TypeEngine,
to_instance,
)
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError(f"The type of {col} is not a SQLAlchemy type")
table = SQLTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
schema=schema,
dtype=dtype,
)
table.create()
return table
def check_case_sensitive(
self,
name,
schema,
):
"""
Checks table name for issues with case-sensitivity.
Method is called after data is inserted.
"""
if not name.isdigit() and not name.islower():
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
if _gt14():
from sqlalchemy import inspect
insp = inspect(conn)
table_names = insp.get_table_names(
schema=schema or self.meta.schema
)
else:
table_names = engine.table_names(
schema=schema or self.meta.schema, connection=conn
)
if name not in table_names:
msg = (
f"The provided table name '{name}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
)
warnings.warn(msg, UserWarning)
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
engine="auto",
**engine_kwargs,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
method : {None', 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
engine : {'auto', 'sqlalchemy'}, default 'auto'
SQL engine library to use. If 'auto', then the option
``io.sql.engine`` is used. The default ``io.sql.engine``
behavior is 'sqlalchemy'
.. versionadded:: 1.3.0
**engine_kwargs
Any additional kwargs are passed to the engine.
"""
sql_engine = get_engine(engine)
table = self.prep_table(
frame=frame,
name=name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
dtype=dtype,
)
total_inserted = sql_engine.insert_records(
table=table,
con=self.connectable,
frame=frame,
name=name,
index=index,
schema=schema,
chunksize=chunksize,
method=method,
**engine_kwargs,
)
self.check_case_sensitive(name=name, schema=schema)
return total_inserted
@property
def tables(self):
return self.meta.tables
def has_table(self, name: str, schema: str | None = None):
if _gt14():
from sqlalchemy import inspect
insp = inspect(self.connectable)
return insp.has_table(name, schema or self.meta.schema)
else:
return self.connectable.run_callable(
self.connectable.dialect.has_table, name, schema or self.meta.schema
)
def get_table(self, table_name: str, schema: str | None = None):
from sqlalchemy import (
Numeric,
Table,
)
schema = schema or self.meta.schema
tbl = Table(
table_name, self.meta, autoload_with=self.connectable, schema=schema
)
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name: str, schema: str | None = None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(bind=self.connectable, only=[table_name], schema=schema)
self.get_table(table_name, schema).drop(bind=self.connectable)
self.meta.clear()
def _create_sql_schema(
self,
frame: DataFrame,
table_name: str,
keys: list[str] | None = None,
dtype: DtypeArg | None = None,
schema: str | None = None,
):
table = SQLTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
"string": "TEXT",
"floating": "REAL",
"integer": "INTEGER",
"datetime": "TIMESTAMP",
"date": "DATE",
"time": "TIME",
"boolean": "INTEGER",
}
def _get_unicode_name(name):
try:
uname = str(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError as err:
raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
return uname
def _get_valid_sqlite_name(name):
# See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError("SQLite identifier cannot contain NULs")
return '"' + uname.replace('"', '""') + '"'
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super().__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self, *, num_rows: int):
names = list(map(str, self.frame.columns))
wld = "?" # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
for idx in self.index[::-1]:
names.insert(0, idx)
bracketed_names = [escape(column) for column in names]
col_names = ",".join(bracketed_names)
row_wildcards = ",".join([wld] * len(names))
wildcards = ",".join([f"({row_wildcards})" for _ in range(num_rows)])
insert_statement = (
f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
)
return insert_statement
def _execute_insert(self, conn, keys, data_iter) -> int:
data_list = list(data_iter)
conn.executemany(self.insert_statement(num_rows=1), data_list)
return conn.rowcount
def _execute_insert_multi(self, conn, keys, data_iter) -> int:
data_list = list(data_iter)
flattened_data = [x for row in data_list for x in row]
conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
return conn.rowcount
def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
escape = _get_valid_sqlite_name
create_tbl_stmts = [
escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
)
if self.schema:
schema_name = self.schema + "."
else:
schema_name = ""
create_stmts = [
"CREATE TABLE "
+ schema_name
+ escape(self.name)
+ " (\n"
+ ",\n ".join(create_tbl_stmts)
+ "\n)"
]
ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX "
+ escape("ix_" + self.name + "_" + cnames)
+ "ON "
+ escape(self.name)
+ " ("
+ cnames_br
+ ")"
)
return create_stmts
def _sql_type_name(self, col):
dtype: DtypeArg = self.dtype or {}
if is_dict_like(dtype):
dtype = cast(dict, dtype)
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=find_stack_level(),
)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support SQLite connections (fallback without
SQLAlchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con):
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except Exception:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
cur = self.con.cursor()
try:
cur.execute(*args, **kwargs)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception as inner_exc: # pragma: no cover
ex = DatabaseError(
f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
)
raise ex from inner_exc
ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
raise ex from exc
@staticmethod
def _query_iterator(
cursor,
chunksize: int,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: DtypeArg | None = None,
):
"""Return generator through chunked result set"""
has_read_data = False
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
if not has_read_data:
yield DataFrame.from_records(
[], columns=columns, coerce_float=coerce_float
)
break
else:
has_read_data = True
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
def read_query(
self,
sql,
index_col=None,
coerce_float: bool = True,
params=None,
parse_dates=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(
cursor,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
**kwargs,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: string
Name of SQL table.
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if it does not exist.
index : bool, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatibility with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
method : {None, 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
"""
if dtype:
if not is_dict_like(dtype):
# error: Value expression in dictionary comprehension has incompatible
# type "Union[ExtensionDtype, str, dtype[Any], Type[object],
# Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
# Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]"; expected type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"
dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
else:
dtype = cast(dict, dtype)
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError(f"{col} ({my_type}) not a string")
table = SQLiteTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
dtype=dtype,
)
table.create()
return table.insert(chunksize, method)
def has_table(self, name: str, schema: str | None = None):
wld = "?"
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
return len(self.execute(query, [name]).fetchall()) > 0
def get_table(self, table_name: str, schema: str | None = None):
return None # not supported in fallback mode
def drop_table(self, name: str, schema: str | None = None):
drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql)
def _create_sql_schema(
self,
frame,
table_name: str,
keys=None,
dtype: DtypeArg | None = None,
schema: str | None = None,
):
table = SQLiteTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
def get_schema(
frame,
name: str,
keys=None,
con=None,
dtype: DtypeArg | None = None,
schema: str | None = None,
):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : str
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
schema: str, default: None
Optional specifying the schema to be used in creating the table.
.. versionadded:: 1.2.0
"""
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(
frame, name, keys=keys, dtype=dtype, schema=schema
)
| 33.148691 | 120 | 0.581343 |
from __future__ import annotations
from contextlib import contextmanager
from datetime import (
date,
datetime,
time,
)
from functools import partial
import re
from typing import (
Any,
Iterator,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas._typing import DtypeArg
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_dict_like,
is_list_like,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
from pandas import get_option
from pandas.core.api import (
DataFrame,
Series,
)
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.tools.datetimes import to_datetime
from pandas.util.version import Version
class DatabaseError(OSError):
pass
def _gt14() -> bool:
import sqlalchemy
return Version(sqlalchemy.__version__) >= Version("1.4.0")
def _convert_params(sql, params):
args = [sql]
if params is not None:
if hasattr(params, "keys"):
args += [params]
else:
args += [list(params)]
return args
def _process_parse_dates_argument(parse_dates):
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, "__iter__"):
parse_dates = [parse_dates]
return parse_dates
def _handle_date_column(
col, utc: bool | None = None, format: str | dict[str, Any] | None = None
):
if isinstance(format, dict):
error = format.pop("errors", None) or "ignore"
return to_datetime(col, errors=error, **format)
else:
if format is None and (
issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)
):
format = "s"
if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
return to_datetime(col, errors="coerce", unit=format, utc=utc)
elif is_datetime64tz_dtype(col.dtype):
return to_datetime(col, utc=True)
else:
return to_datetime(col, errors="coerce", format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
parse_dates = _process_parse_dates_argument(parse_dates)
for col_name, df_col in data_frame.items():
if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def _wrap_result(
data,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: DtypeArg | None = None,
):
frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
if dtype:
frame = frame.astype(dtype)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, params=None):
pandas_sql = pandasSQL_builder(con)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
@overload
def read_sql_table(
table_name,
con,
schema=...,
index_col=...,
coerce_float=...,
parse_dates=...,
columns=...,
chunksize: None = ...,
) -> DataFrame:
...
@overload
def read_sql_table(
table_name,
con,
schema=...,
index_col=...,
coerce_float=...,
parse_dates=...,
columns=...,
chunksize: int = ...,
) -> Iterator[DataFrame]:
...
def read_sql_table(
table_name: str,
con,
schema: str | None = None,
index_col: str | Sequence[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
chunksize: int | None = None,
) -> DataFrame | Iterator[DataFrame]:
pandas_sql = pandasSQL_builder(con, schema=schema)
if not pandas_sql.has_table(table_name):
raise ValueError(f"Table {table_name} not found")
table = pandas_sql.read_table(
table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
if table is not None:
return table
else:
raise ValueError(f"Table {table_name} not found", con)
@overload
def read_sql_query(
sql,
con,
index_col=...,
coerce_float=...,
params=...,
parse_dates=...,
chunksize: None = ...,
dtype: DtypeArg | None = ...,
) -> DataFrame:
...
@overload
def read_sql_query(
sql,
con,
index_col=...,
coerce_float=...,
params=...,
parse_dates=...,
chunksize: int = ...,
dtype: DtypeArg | None = ...,
) -> Iterator[DataFrame]:
...
def read_sql_query(
sql,
con,
index_col=None,
coerce_float: bool = True,
params=None,
parse_dates=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
) -> DataFrame | Iterator[DataFrame]:
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
dtype=dtype,
)
@overload
def read_sql(
sql,
con,
index_col=...,
coerce_float=...,
params=...,
parse_dates=...,
columns=...,
chunksize: None = ...,
) -> DataFrame:
...
@overload
def read_sql(
sql,
con,
index_col=...,
coerce_float=...,
params=...,
parse_dates=...,
columns=...,
chunksize: int = ...,
) -> Iterator[DataFrame]:
...
def read_sql(
sql,
con,
index_col: str | Sequence[str] | None = None,
coerce_float: bool = True,
params=None,
parse_dates=None,
columns=None,
chunksize: int | None = None,
) -> DataFrame | Iterator[DataFrame]:
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
try:
_is_table_name = pandas_sql.has_table(sql)
except Exception:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(bind=pandas_sql.connectable, only=[sql])
return pandas_sql.read_table(
sql,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
else:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
def to_sql(
frame,
name: str,
con,
schema: str | None = None,
if_exists: str = "fail",
index: bool = True,
index_label=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
method: str | None = None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
if if_exists not in ("fail", "replace", "append"):
raise ValueError(f"'{if_exists}' is not valid for if_exists")
pandas_sql = pandasSQL_builder(con, schema=schema)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError(
"'frame' argument should be either a Series or a DataFrame"
)
return pandas_sql.to_sql(
frame,
name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
chunksize=chunksize,
dtype=dtype,
method=method,
engine=engine,
**engine_kwargs,
)
def has_table(table_name: str, con, schema: str | None = None):
pandas_sql = pandasSQL_builder(con, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def pandasSQL_builder(con, schema: str | None = None):
import sqlite3
if isinstance(con, sqlite3.Connection) or con is None:
return SQLiteDatabase(con)
sqlalchemy = import_optional_dependency("sqlalchemy")
if isinstance(con, str):
con = sqlalchemy.create_engine(con)
if isinstance(con, sqlalchemy.engine.Connectable):
return SQLDatabase(con, schema=schema)
raise ValueError(
"pandas only support SQLAlchemy connectable(engine/connection) or"
"database string URI or sqlite3 DBAPI2 connection"
)
class SQLTable(PandasObject):
def __init__(
self,
name: str,
pandas_sql_engine,
frame=None,
index=True,
if_exists="fail",
prefix="pandas",
index_label=None,
schema=None,
keys=None,
dtype: DtypeArg | None = None,
):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
self.table = self._create_table_setup()
else:
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError(f"Could not init table '{name}'")
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
if _gt14():
self.table = self.table.to_metadata(self.pd_sql.meta)
else:
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create(bind=self.pd_sql.connectable)
def create(self):
if self.exists():
if self.if_exists == "fail":
raise ValueError(f"Table '{self.name}' already exists.")
elif self.if_exists == "replace":
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == "append":
pass
else:
raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
else:
self._execute_create()
def _execute_insert(self, conn, keys: list[str], data_iter) -> int:
data = [dict(zip(keys, row)) for row in data_iter]
result = conn.execute(self.table.insert(), data)
return result.rowcount
def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int:
from sqlalchemy import insert
data = [dict(zip(keys, row)) for row in data_iter]
stmt = insert(self.table).values(data)
result = conn.execute(stmt)
return result.rowcount
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(f"duplicate name in index/columns: {err}") from err
else:
temp = self.frame
column_names = list(map(str, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
for i, (_, ser) in enumerate(temp.items()):
vals = ser._values
if vals.dtype.kind == "M":
d = vals.to_pydatetime()
elif vals.dtype.kind == "m":
= vals.view("i8").astype(object)
else:
d = vals.astype(object)
assert isinstance(d, np.ndarray), type(d)
if ser._can_hold_na:
mask = isna(d)
d[mask] = None
data_list[i] = d
return column_names, data_list
def insert(
self, chunksize: int | None = None, method: str | None = None
) -> int | None:
if method is None:
exec_insert = self._execute_insert
elif method == "multi":
exec_insert = self._execute_insert_multi
elif callable(method):
exec_insert = partial(method, self)
else:
raise ValueError(f"Invalid parameter `method`: {method}")
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return 0
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
chunks = (nrows // chunksize) + 1
total_inserted = 0
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))
num_inserted = exec_insert(conn, keys, chunk_iter)
if num_inserted is None:
total_inserted = None
else:
total_inserted += num_inserted
return total_inserted
def _query_iterator(
self,
result,
chunksize: str | None,
columns,
coerce_float: bool = True,
parse_dates=None,
):
has_read_data = False
while True:
data = result.fetchmany(chunksize)
if not data:
if not has_read_data:
yield DataFrame.from_records(
[], columns=columns, coerce_float=coerce_float
)
break
else:
has_read_data = True
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
from sqlalchemy import select
if columns is not None and len(columns) > 0:
cols = [self.table.c[n] for n in columns]
if self.index is not None:
for idx in self.index[::-1]:
cols.insert(0, self.table.c[idx])
sql_select = select(*cols) if _gt14() else select(cols)
else:
sql_select = select(self.table) if _gt14() else self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
column_names,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
if index is True:
nlevels = self.frame.index.nlevels
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
f"levels, which is {nlevels}"
)
else:
return index_label
if (
nlevels == 1
and "index" not in self.frame.columns
and self.frame.index.name is None
):
return ["index"]
else:
return com.fill_missing_names(self.frame.index.names)
elif isinstance(index, str):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(self.frame.index._get_level_values(i))
column_names_and_types.append((str(idx_label), idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import (
Column,
PrimaryKeyConstraint,
Table,
)
from sqlalchemy.schema import MetaData
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
columns = [
Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types
]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
meta = MetaData()
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (
col_type is datetime
or col_type is date
or col_type is DatetimeTZDtype
):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype("int64") or col_type is bool:
self.frame[col_name] = df_col.astype(col_type, copy=False)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, col):
dtype: DtypeArg = self.dtype or {}
if is_dict_like(dtype):
dtype = cast(dict, dtype)
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
TIMESTAMP,
BigInteger,
Boolean,
Date,
DateTime,
Float,
Integer,
SmallInteger,
Text,
Time,
)
if col_type == "datetime64" or col_type == "datetime":
# GH 9086: TIMESTAMP is the suggested type if the column contains
# timezone information
try:
if col.dt.tz is not None:
return TIMESTAMP(timezone=True)
except AttributeError:
# The column is actually a DatetimeIndex
# GH 26761 or an Index with date-like data e.g. 9999-01-01
if getattr(col, "tz", None) is not None:
return TIMESTAMP(timezone=True)
return DateTime
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=find_stack_level(),
)
return BigInteger
elif col_type == "floating":
if col.dtype == "float32":
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == "integer":
# GH35076 Map pandas integer to optimal SQLAlchemy integer type
if col.dtype.name.lower() in ("int8", "uint8", "int16"):
return SmallInteger
elif col.dtype.name.lower() in ("uint16", "int32"):
return Integer
elif col.dtype.name.lower() == "uint64":
raise ValueError("Unsigned 64 bit integer datatype is not supported")
else:
return BigInteger
elif col_type == "boolean":
return Boolean
elif col_type == "date":
return Date
elif col_type == "time":
return Time
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (
TIMESTAMP,
Boolean,
Date,
DateTime,
Float,
Integer,
)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype("int64")
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
def read_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
) -> int | None:
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
class BaseEngine:
def insert_records(
self,
table: SQLTable,
con,
frame,
name,
index=True,
schema=None,
chunksize=None,
method=None,
**engine_kwargs,
) -> int | None:
raise AbstractMethodError(self)
class SQLAlchemyEngine(BaseEngine):
def __init__(self):
import_optional_dependency(
"sqlalchemy", extra="sqlalchemy is required for SQL support."
)
def insert_records(
self,
table: SQLTable,
con,
frame,
name,
index=True,
schema=None,
chunksize=None,
method=None,
**engine_kwargs,
) -> int | None:
from sqlalchemy import exc
try:
return table.insert(chunksize=chunksize, method=method)
except exc.SQLAlchemyError as err:
# GH34431
# https://stackoverflow.com/a/67358288/6067848
msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?#
)|inf can not be used with MySQL"""
err_text = str(err.orig)
if re.search(msg, err_text):
raise ValueError("inf cannot be used with MySQL") from err
else:
raise err
def get_engine(engine: str) -> BaseEngine:
if engine == "auto":
engine = get_option("io.sql.engine")
if engine == "auto":
# try engines in this order
engine_classes = [SQLAlchemyEngine]
error_msgs = ""
for engine_class in engine_classes:
try:
return engine_class()
except ImportError as err:
error_msgs += "\n - " + str(err)
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'sqlalchemy'.\n"
"A suitable version of "
"sqlalchemy is required for sql I/O "
"support.\n"
"Trying to import the above resulted in these errors:"
f"{error_msgs}"
)
elif engine == "sqlalchemy":
return SQLAlchemyEngine()
raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
class SQLDatabase(PandasSQL):
def __init__(self, engine, schema: str | None = None):
from sqlalchemy.schema import MetaData
self.connectable = engine
self.meta = MetaData(schema=schema)
@contextmanager
def run_transaction(self):
from sqlalchemy.engine import Engine
if isinstance(self.connectable, Engine):
with self.connectable.connect() as conn:
with conn.begin():
yield conn
else:
yield self.connectable
def execute(self, *args, **kwargs):
return self.connectable.execution_options().execute(*args, **kwargs)
def read_table(
self,
table_name: str,
index_col: str | Sequence[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
schema: str | None = None,
chunksize: int | None = None,
):
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
@staticmethod
def _query_iterator(
result,
chunksize: int,
columns,
index_col=None,
coerce_float=True,
parse_dates=None,
dtype: DtypeArg | None = None,
):
has_read_data = False
while True:
data = result.fetchmany(chunksize)
if not data:
if not has_read_data:
yield _wrap_result(
[],
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
break
else:
has_read_data = True
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
def read_query(
self,
sql: str,
index_col: str | None = None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
):
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
else:
data = result.fetchall()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
return frame
read_sql = read_query
def prep_table(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
dtype: DtypeArg | None = None,
) -> SQLTable:
if dtype:
if not is_dict_like(dtype):
# error: Value expression in dictionary comprehension has incompatible
# type "Union[ExtensionDtype, str, dtype[Any], Type[object],
# Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
# Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]"; expected type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"
dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
else:
dtype = cast(dict, dtype)
from sqlalchemy.types import (
TypeEngine,
to_instance,
)
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError(f"The type of {col} is not a SQLAlchemy type")
table = SQLTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
schema=schema,
dtype=dtype,
)
table.create()
return table
def check_case_sensitive(
self,
name,
schema,
):
if not name.isdigit() and not name.islower():
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
if _gt14():
from sqlalchemy import inspect
insp = inspect(conn)
table_names = insp.get_table_names(
schema=schema or self.meta.schema
)
else:
table_names = engine.table_names(
schema=schema or self.meta.schema, connection=conn
)
if name not in table_names:
msg = (
f"The provided table name '{name}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
)
warnings.warn(msg, UserWarning)
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
engine="auto",
**engine_kwargs,
) -> int | None:
sql_engine = get_engine(engine)
table = self.prep_table(
frame=frame,
name=name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
dtype=dtype,
)
total_inserted = sql_engine.insert_records(
table=table,
con=self.connectable,
frame=frame,
name=name,
index=index,
schema=schema,
chunksize=chunksize,
method=method,
**engine_kwargs,
)
self.check_case_sensitive(name=name, schema=schema)
return total_inserted
@property
def tables(self):
return self.meta.tables
def has_table(self, name: str, schema: str | None = None):
if _gt14():
from sqlalchemy import inspect
insp = inspect(self.connectable)
return insp.has_table(name, schema or self.meta.schema)
else:
return self.connectable.run_callable(
self.connectable.dialect.has_table, name, schema or self.meta.schema
)
def get_table(self, table_name: str, schema: str | None = None):
from sqlalchemy import (
Numeric,
Table,
)
schema = schema or self.meta.schema
tbl = Table(
table_name, self.meta, autoload_with=self.connectable, schema=schema
)
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name: str, schema: str | None = None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(bind=self.connectable, only=[table_name], schema=schema)
self.get_table(table_name, schema).drop(bind=self.connectable)
self.meta.clear()
def _create_sql_schema(
self,
frame: DataFrame,
table_name: str,
keys: list[str] | None = None,
dtype: DtypeArg | None = None,
schema: str | None = None,
):
table = SQLTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
"string": "TEXT",
"floating": "REAL",
"integer": "INTEGER",
"datetime": "TIMESTAMP",
"date": "DATE",
"time": "TIME",
"boolean": "INTEGER",
}
def _get_unicode_name(name):
try:
uname = str(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError as err:
raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
return uname
def _get_valid_sqlite_name(name):
# See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError("SQLite identifier cannot contain NULs")
return '"' + uname.replace('"', '""') + '"'
class SQLiteTable(SQLTable):
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super().__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self, *, num_rows: int):
names = list(map(str, self.frame.columns))
wld = "?" # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
for idx in self.index[::-1]:
names.insert(0, idx)
bracketed_names = [escape(column) for column in names]
col_names = ",".join(bracketed_names)
row_wildcards = ",".join([wld] * len(names))
wildcards = ",".join([f"({row_wildcards})" for _ in range(num_rows)])
insert_statement = (
f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
)
return insert_statement
def _execute_insert(self, conn, keys, data_iter) -> int:
data_list = list(data_iter)
conn.executemany(self.insert_statement(num_rows=1), data_list)
return conn.rowcount
def _execute_insert_multi(self, conn, keys, data_iter) -> int:
data_list = list(data_iter)
flattened_data = [x for row in data_list for x in row]
conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
return conn.rowcount
def _create_table_setup(self):
column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
escape = _get_valid_sqlite_name
create_tbl_stmts = [
escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
)
if self.schema:
schema_name = self.schema + "."
else:
schema_name = ""
create_stmts = [
"CREATE TABLE "
+ schema_name
+ escape(self.name)
+ " (\n"
+ ",\n ".join(create_tbl_stmts)
+ "\n)"
]
ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX "
+ escape("ix_" + self.name + "_" + cnames)
+ "ON "
+ escape(self.name)
+ " ("
+ cnames_br
+ ")"
)
return create_stmts
def _sql_type_name(self, col):
dtype: DtypeArg = self.dtype or {}
if is_dict_like(dtype):
dtype = cast(dict, dtype)
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=find_stack_level(),
)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
def __init__(self, con):
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except Exception:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
cur = self.con.cursor()
try:
cur.execute(*args, **kwargs)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception as inner_exc: # pragma: no cover
ex = DatabaseError(
f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
)
raise ex from inner_exc
ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
raise ex from exc
@staticmethod
def _query_iterator(
cursor,
chunksize: int,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: DtypeArg | None = None,
):
has_read_data = False
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
if not has_read_data:
yield DataFrame.from_records(
[], columns=columns, coerce_float=coerce_float
)
break
else:
has_read_data = True
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
def read_query(
self,
sql,
index_col=None,
coerce_float: bool = True,
params=None,
parse_dates=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(
cursor,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
**kwargs,
) -> int | None:
if dtype:
if not is_dict_like(dtype):
# error: Value expression in dictionary comprehension has incompatible
# type "Union[ExtensionDtype, str, dtype[Any], Type[object],
# Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
# Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]"; expected type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"
dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
else:
dtype = cast(dict, dtype)
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError(f"{col} ({my_type}) not a string")
table = SQLiteTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
dtype=dtype,
)
table.create()
return table.insert(chunksize, method)
def has_table(self, name: str, schema: str | None = None):
wld = "?"
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
return len(self.execute(query, [name]).fetchall()) > 0
def get_table(self, table_name: str, schema: str | None = None):
return None # not supported in fallback mode
def drop_table(self, name: str, schema: str | None = None):
drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql)
def _create_sql_schema(
self,
frame,
table_name: str,
keys=None,
dtype: DtypeArg | None = None,
schema: str | None = None,
):
table = SQLiteTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
def get_schema(
frame,
name: str,
keys=None,
con=None,
dtype: DtypeArg | None = None,
schema: str | None = None,
):
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(
frame, name, keys=keys, dtype=dtype, schema=schema
)
| true | true |
f7fdc54e14c5e6092dc79862a68525f099901e73 | 2,402 | py | Python | greenberry/cmds/start_funcs.py | greenberrycoin/gbch-blockchain | d99843d720c6e7bd7baaf8bb4639a46dbb56caed | [
"Apache-2.0"
] | null | null | null | greenberry/cmds/start_funcs.py | greenberrycoin/gbch-blockchain | d99843d720c6e7bd7baaf8bb4639a46dbb56caed | [
"Apache-2.0"
] | null | null | null | greenberry/cmds/start_funcs.py | greenberrycoin/gbch-blockchain | d99843d720c6e7bd7baaf8bb4639a46dbb56caed | [
"Apache-2.0"
] | null | null | null | import asyncio
import os
import subprocess
import sys
from pathlib import Path
from typing import Optional
from greenberry.daemon.client import DaemonProxy, connect_to_daemon_and_validate
from greenberry.util.service_groups import services_for_groups
def launch_start_daemon(root_path: Path) -> subprocess.Popen:
os.environ["GREENBERRY_ROOT"] = str(root_path)
# TODO: use startupinfo=subprocess.DETACHED_PROCESS on windows
greenberry = sys.argv[0]
process = subprocess.Popen(f"{greenberry} run_daemon".split(), stdout=subprocess.PIPE)
return process
async def create_start_daemon_connection(root_path: Path) -> Optional[DaemonProxy]:
connection = await connect_to_daemon_and_validate(root_path)
if connection is None:
print("Starting daemon")
# launch a daemon
process = launch_start_daemon(root_path)
# give the daemon a chance to start up
if process.stdout:
process.stdout.readline()
await asyncio.sleep(1)
# it prints "daemon: listening"
connection = await connect_to_daemon_and_validate(root_path)
if connection:
return connection
return None
async def async_start(root_path: Path, group: str, restart: bool) -> None:
daemon = await create_start_daemon_connection(root_path)
if daemon is None:
print("Failed to create the greenberry daemon")
return None
for service in services_for_groups(group):
if await daemon.is_running(service_name=service):
print(f"{service}: ", end="", flush=True)
if restart:
if not await daemon.is_running(service_name=service):
print("not running")
elif await daemon.stop_service(service_name=service):
print("stopped")
else:
print("stop failed")
else:
print("Already running, use `-r` to restart")
continue
print(f"{service}: ", end="", flush=True)
msg = await daemon.start_service(service_name=service)
success = msg and msg["data"]["success"]
if success is True:
print("started")
else:
error = "no response"
if msg:
error = msg["data"]["error"]
print(f"{service} failed to start. Error: {error}")
await daemon.close()
| 34.811594 | 90 | 0.642798 | import asyncio
import os
import subprocess
import sys
from pathlib import Path
from typing import Optional
from greenberry.daemon.client import DaemonProxy, connect_to_daemon_and_validate
from greenberry.util.service_groups import services_for_groups
def launch_start_daemon(root_path: Path) -> subprocess.Popen:
os.environ["GREENBERRY_ROOT"] = str(root_path)
greenberry = sys.argv[0]
process = subprocess.Popen(f"{greenberry} run_daemon".split(), stdout=subprocess.PIPE)
return process
async def create_start_daemon_connection(root_path: Path) -> Optional[DaemonProxy]:
connection = await connect_to_daemon_and_validate(root_path)
if connection is None:
print("Starting daemon")
process = launch_start_daemon(root_path)
if process.stdout:
process.stdout.readline()
await asyncio.sleep(1)
connection = await connect_to_daemon_and_validate(root_path)
if connection:
return connection
return None
async def async_start(root_path: Path, group: str, restart: bool) -> None:
daemon = await create_start_daemon_connection(root_path)
if daemon is None:
print("Failed to create the greenberry daemon")
return None
for service in services_for_groups(group):
if await daemon.is_running(service_name=service):
print(f"{service}: ", end="", flush=True)
if restart:
if not await daemon.is_running(service_name=service):
print("not running")
elif await daemon.stop_service(service_name=service):
print("stopped")
else:
print("stop failed")
else:
print("Already running, use `-r` to restart")
continue
print(f"{service}: ", end="", flush=True)
msg = await daemon.start_service(service_name=service)
success = msg and msg["data"]["success"]
if success is True:
print("started")
else:
error = "no response"
if msg:
error = msg["data"]["error"]
print(f"{service} failed to start. Error: {error}")
await daemon.close()
| true | true |
f7fdc552f7ecb99de31eeaf956540da0b2278715 | 2,076 | py | Python | InstagramAPI/src/http/Response/Objects/User.py | partizan007/Instagram-API | 3435dc6855e1cccf2c85a41839d15ca930563b21 | [
"MIT"
] | 126 | 2016-05-18T19:20:32.000Z | 2022-02-12T10:30:50.000Z | InstagramAPI/src/http/Response/Objects/User.py | partizan007/Instagram-API | 3435dc6855e1cccf2c85a41839d15ca930563b21 | [
"MIT"
] | 41 | 2016-08-07T17:32:37.000Z | 2022-01-13T00:25:31.000Z | InstagramAPI/src/http/Response/Objects/User.py | partizan007/Instagram-API | 3435dc6855e1cccf2c85a41839d15ca930563b21 | [
"MIT"
] | 61 | 2016-07-07T14:18:38.000Z | 2021-03-28T12:48:26.000Z | from InstagramAPI.src.http.Response.Objects.FriendshipStatus import FriendshipStatus
class User(object):
def __init__(self, userData):
self.username = None
self.has_anonymous_profile_picture = False
self.is_favorite = False
self.profile_pic_url = None
self.full_name = None
self.pk = None
self.is_verified = False
self.is_private = False
self.coeff_weight = 0
self.friendship_status = None
self.username = userData['username']
self.profile_pic_url = userData['profile_pic_url']
self.full_name = userData['full_name']
self.pk = userData['pk']
if 'is_verified' in userData and userData['is_verified']:
self.is_verified = userData['is_verified']
self.is_private = userData['is_private']
if 'has_anonymous_profile_picture' in userData and userData['has_anonymous_profile_picture']:
self.has_anonymous_profile_picture = userData['has_anonymous_profile_picture']
if 'is_favorite' in userData and userData['is_favorite']:
self.is_favorite = userData['is_favorite']
if 'coeff_weight' in userData and userData['coeff_weight']:
self.coeff_weight = userData['coeff_weight']
if 'friendship_status' in userData and userData['friendship_status']:
self.friendship_status = FriendshipStatus(userData['friendship_status'])
def getUsername(self):
return self.username
def getProfilePicUrl(self):
return self.profile_pic_url
def getFullName(self):
return self.full_name
def getUsernameId(self):
return self.pk
def isVerified(self):
return self.is_verified
def isPrivate(self):
return self.is_private
def hasAnonymousProfilePicture(self):
return self.has_anonymous_profile_picture
def isFavorite(self):
return self.is_favorite
def getCoeffWeight(self):
return self.coeff_weight
def getFriendshipStatus(self):
return self.friendship_status
| 33.483871 | 101 | 0.679672 | from InstagramAPI.src.http.Response.Objects.FriendshipStatus import FriendshipStatus
class User(object):
def __init__(self, userData):
self.username = None
self.has_anonymous_profile_picture = False
self.is_favorite = False
self.profile_pic_url = None
self.full_name = None
self.pk = None
self.is_verified = False
self.is_private = False
self.coeff_weight = 0
self.friendship_status = None
self.username = userData['username']
self.profile_pic_url = userData['profile_pic_url']
self.full_name = userData['full_name']
self.pk = userData['pk']
if 'is_verified' in userData and userData['is_verified']:
self.is_verified = userData['is_verified']
self.is_private = userData['is_private']
if 'has_anonymous_profile_picture' in userData and userData['has_anonymous_profile_picture']:
self.has_anonymous_profile_picture = userData['has_anonymous_profile_picture']
if 'is_favorite' in userData and userData['is_favorite']:
self.is_favorite = userData['is_favorite']
if 'coeff_weight' in userData and userData['coeff_weight']:
self.coeff_weight = userData['coeff_weight']
if 'friendship_status' in userData and userData['friendship_status']:
self.friendship_status = FriendshipStatus(userData['friendship_status'])
def getUsername(self):
return self.username
def getProfilePicUrl(self):
return self.profile_pic_url
def getFullName(self):
return self.full_name
def getUsernameId(self):
return self.pk
def isVerified(self):
return self.is_verified
def isPrivate(self):
return self.is_private
def hasAnonymousProfilePicture(self):
return self.has_anonymous_profile_picture
def isFavorite(self):
return self.is_favorite
def getCoeffWeight(self):
return self.coeff_weight
def getFriendshipStatus(self):
return self.friendship_status
| true | true |
f7fdc5668ed5d1e051febae0930901b26d1090bc | 786 | py | Python | pyexcel/plugins/renderers/__init__.py | hiaselhans/pyexcel | 4c96909eaa7ec322f28207f43e41f1fff07d8123 | [
"BSD-3-Clause"
] | null | null | null | pyexcel/plugins/renderers/__init__.py | hiaselhans/pyexcel | 4c96909eaa7ec322f28207f43e41f1fff07d8123 | [
"BSD-3-Clause"
] | null | null | null | pyexcel/plugins/renderers/__init__.py | hiaselhans/pyexcel | 4c96909eaa7ec322f28207f43e41f1fff07d8123 | [
"BSD-3-Clause"
] | null | null | null | """
pyexcel.plugins.renderers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A list of built-in renderers
:copyright: (c) 2015-2022 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel.plugins import PyexcelPluginChain
from pyexcel_io.plugins import WRITERS
from pyexcel_io.constants import DB_SQL, DB_DJANGO
PyexcelPluginChain(__name__).add_a_renderer(
relative_plugin_class_path="sqlalchemy.SQLAlchemyRenderer",
file_types=[DB_SQL],
).add_a_renderer(
relative_plugin_class_path="django.DjangoRenderer", file_types=[DB_DJANGO]
).add_a_renderer(
relative_plugin_class_path="excel.ExcelRenderer",
file_types=WRITERS.get_all_formats(),
).add_a_renderer(
relative_plugin_class_path="_texttable.TextTableRenderer",
file_types=["texttable"],
)
| 29.111111 | 78 | 0.745547 | from pyexcel.plugins import PyexcelPluginChain
from pyexcel_io.plugins import WRITERS
from pyexcel_io.constants import DB_SQL, DB_DJANGO
PyexcelPluginChain(__name__).add_a_renderer(
relative_plugin_class_path="sqlalchemy.SQLAlchemyRenderer",
file_types=[DB_SQL],
).add_a_renderer(
relative_plugin_class_path="django.DjangoRenderer", file_types=[DB_DJANGO]
).add_a_renderer(
relative_plugin_class_path="excel.ExcelRenderer",
file_types=WRITERS.get_all_formats(),
).add_a_renderer(
relative_plugin_class_path="_texttable.TextTableRenderer",
file_types=["texttable"],
)
| true | true |
f7fdc632b63b56e6ac7db5f3721b8ac23214eb58 | 14,597 | py | Python | tests/transformations/transformations_utils.py | cmutel/Ocelot | 20e9639570c43f84ae255750a6c402ebabe00981 | [
"BSD-3-Clause"
] | 21 | 2016-06-01T14:10:07.000Z | 2022-02-28T01:56:31.000Z | tests/transformations/transformations_utils.py | cmutel/Ocelot | 20e9639570c43f84ae255750a6c402ebabe00981 | [
"BSD-3-Clause"
] | 152 | 2016-05-16T21:33:22.000Z | 2019-06-24T12:57:14.000Z | tests/transformations/transformations_utils.py | cmutel/Ocelot | 20e9639570c43f84ae255750a6c402ebabe00981 | [
"BSD-3-Clause"
] | 12 | 2016-09-05T15:35:59.000Z | 2021-07-03T19:28:47.000Z | # -*- coding: utf-8 -*-
from ocelot.errors import InvalidMultioutputDataset, ZeroProduction
from ocelot.transformations.uncertainty import remove_exchange_uncertainty
from ocelot.transformations.utils import *
from ocelot.utils import get_function_meta
import pytest
def test_allocatable_production():
exchanges = [
{'type': 'reference product'},
{'type': 'not reference product'},
{'type': 'byproduct', 'byproduct classification': 'allocatable product'},
{'type': 'byproduct', 'byproduct classification': 'cat'},
{'type': 'byproduct', 'byproduct classification': 'allocatable product'},
]
dataset = {'exchanges': exchanges}
for x, y in zip(allocatable_production(dataset), exchanges[0:5:2]):
assert x == y
assert len(list(allocatable_production(dataset))) == 3
def test_allocatable_production_include_all_reference_products():
given = {"exchanges": [
{'type': 'reference product', 'byproduct classification': 'recyclable'},
{'type': 'reference product', 'byproduct classification': 'allocatable product'},
{'type': 'reference product', 'byproduct classification': 'waste'},
{'type': 'reference product', 'byproduct classification': 'foo'},
]}
assert len(list(allocatable_production(given))) == 4
def test_nonproduction_exchanges():
exchanges = [
{'type': 'reference product'},
{'type': 'not reference product'},
{'type': 'byproduct', 'byproduct classification': 'allocatable product'},
{'type': 'byproduct', 'byproduct classification': 'cat'},
{'type': 'byproduct', 'byproduct classification': 'allocatable product'},
]
dataset = {'exchanges': exchanges}
for x, y in zip(nonproduction_exchanges(dataset), exchanges[1:4:2]):
assert x == y
assert len(list(nonproduction_exchanges(dataset))) == 2
def test_get_property_by_name():
given = {'properties': [{'name': 'foo', 'amount': 42}]}
assert get_property_by_name(given, "foo") == {'name': 'foo', 'amount': 42}
def test_get_property_by_name_not_present():
given = {'properties': [{'name': 'bar', 'amount': 42}]}
assert get_property_by_name({}, "foo") == {}
def test_get_numerical_property():
given = {'properties': [{'name': 'foo', 'amount': 42}]}
assert get_numerical_property(given, "foo") == 42
def test_get_numerical_property_no_properties():
assert get_numerical_property({}, "foo") is None
def test_get_numerical_property_not_correct_property():
given = {'properties': [{'name': 'foo', 'amount': 42}]}
assert get_numerical_property(given, "bar") is None
def test_single_reference_product():
given = {'exchanges': [
{
'type': 'reference product',
'name': 'sandwich'
},
{
'type': 'not reference product',
'name': 'woooo!'
},
{
'type': 'byproduct',
'byproduct classification': 'allocatable product',
'name': 'should be skipped'
},
]}
expected = {'type': 'reference product', 'name': 'sandwich'}
assert get_single_reference_product(given) == expected
def test_single_reference_product_multiple():
given = {
'filepath': 'foo',
'exchanges': [
{
'type': 'reference product',
'name': 'sandwich'
},
{
'type': 'reference product',
'name': 'hamburger'
},
]
}
with pytest.raises(InvalidMultioutputDataset):
get_single_reference_product(given)
def test_single_reference_product_none():
with pytest.raises(ValueError):
get_single_reference_product({
'filepath': 'foo',
'exchanges': [{'type': 'something'}]
})
def test_normalize_reference_production_logging():
given = {
'name': 'foo',
'exchanges': [
{
'type': 'reference product',
'name': 'bar',
'amount': 0.5
},
{
'type': 'something else',
'amount': 10
}
]
}
assert normalize_reference_production_amount(given)
def test_normalize_reference_production_epsilon():
given = {
'name': 'foo',
'exchanges': [
{
'type': 'reference product',
'amount': 0.9
},
{
'type': 'something else',
'amount': 10
}
]
}
expected = {
'name': 'foo',
'exchanges': [
{
'type': 'reference product',
'amount': 0.9
},
{
'type': 'something else',
'amount': 10
}
]
}
result = normalize_reference_production_amount(given, epsilon=0.5, log=False)[0]
assert result == expected
def test_normalize_reference_production_amount():
given = {'exchanges': [
{
'type': 'reference product',
'amount': 0.5
},
{
'type': 'something else',
'amount': 10
}
]}
expected = {'exchanges': [
{
'type': 'reference product',
'amount': 1
},
{
'type': 'something else',
'amount': 20
}
]}
assert normalize_reference_production_amount(given, log=False)[0] == expected
def test_normalize_reference_production_amount_zero_amount():
given = {
'filepath': 'foo',
'exchanges': [{
'type': 'reference product',
'amount': 0
}]
}
with pytest.raises(ZeroProduction):
normalize_reference_production_amount(given, log=False)
def test_activity_grouper():
given = {
'name': 'foo',
'exchanges': []
}
assert activity_grouper(given) == ('foo', ())
given = {
'name': 'bar',
'exchanges': [{
'type': 'reference product',
'name': 'b'
}, {
'type': 'reference product',
'name': 'a'
}]
}
assert activity_grouper(given) == ('bar', ('a', 'b'))
def test_label_reference_product():
valid = [{
'type': 'transforming activity',
'exchanges': [{
'type': 'reference product',
'name': 'foo'
}]
}]
expected = [{
'type': 'transforming activity',
'reference product': 'foo',
'exchanges': [{
'type': 'reference product',
'name': 'foo'
}]
}]
assert label_reference_product(valid) == expected
def test_label_reference_product_no_exchanges():
invalid = [{
'filepath': '',
'type': 'transforming activity',
'exchanges': [{'type': 'nope'}]
}]
with pytest.raises(ValueError):
label_reference_product(invalid)
def test_label_reference_product_multiple_rp():
invalid = [{
'filepath': '',
'type': 'transforming activity',
'exchanges': [
{'type': 'reference product'},
{'type': 'reference product'},
]
}]
with pytest.raises(InvalidMultioutputDataset):
label_reference_product(invalid)
def test_remove_uncertainty():
expected = {'amount': 42}
assert remove_exchange_uncertainty({'amount': 42}) == expected
def test_nonreference_product():
given = {
'production volume': 'delete me',
'formula': 'delete me too'
}
expected = {
'type': 'dropped product',
'amount': 0,
}
assert nonreference_product(given) == expected
@pytest.fixture(scope="function")
def no_normalization(monkeypatch):
monkeypatch.setattr(
'ocelot.transformations.utils.normalize_reference_production_amount',
lambda x, log=True, epsilon=1e-14: x
)
def test_choose_reference_product_exchange(no_normalization):
given = {'exchanges': [{
'type': 'reference product',
'amount': 42
}, {
'type': 'reference product',
'amount': 20,
'formula': 'delete me',
'production volume': 'delete me too',
}, {
'type': 'other thing',
'amount': 100
}]}
expected = {'exchanges': [{
'amount': 42,
'type': 'reference product',
}, {
'type': 'dropped product',
'amount': 0,
}, {
'type': 'other thing',
'amount': 10
}]}
answer = choose_reference_product_exchange(given, given['exchanges'][0], 0.1)
assert answer == expected
for one, two in zip(given['exchanges'], answer['exchanges']):
# Check for copy
assert one is not two
def test_choose_reference_product_exchange_byproducts(no_normalization):
given = {'exchanges': [{
'type': 'byproduct',
'byproduct classification': "allocatable product",
'amount': 42
}, {
'type': 'reference product',
'amount': 20,
'production volume': 'delete me',
'formula': 'delete me too',
}, {
'type': 'other thing',
'amount': 100
}]}
expected = {'exchanges': [{
'amount': 42,
'type': 'reference product',
}, {
'type': 'dropped product',
'amount': 0,
}, {
'type': 'other thing',
'amount': 10
}]}
answer = choose_reference_product_exchange(given, given['exchanges'][0], 0.1)
assert answer == expected
for one, two in zip(given['exchanges'], answer['exchanges']):
# Check for copy
assert one is not two
def test_choose_reference_product_exchange_zero_production(no_normalization):
given = {
'filepath': 'foo',
'exchanges': [{
'type': 'reference product',
'amount': 0
}]
}
with pytest.raises(ZeroProduction):
choose_reference_product_exchange(given, given['exchanges'][0])
@pytest.fixture(scope="function")
def parameterized_ds():
return {
'exchanges': [{
'amount': 3.1415926535,
'variable': 'pie',
'production volume': { # Nonsensical but should work
'variable': 'number_blueberries',
'amount': 42
},
'properties': [{
'variable': 'blueberry_volume',
'amount': 17
}]
}, {
'variable': 'circle',
'formula': 'pie * radius ** 2',
'properties': [{
'variable': 'radius',
'formula': 'blueberry_size * number_blueberries'
}]
}],
'parameters': [{
'variable': 'blueberry_size',
'formula': 'blueberry_density * blueberry_volume'
}, {
'variable': 'blueberry_density',
'amount': 1
}]
}
def test_iterate_all_parameters(parameterized_ds):
generator = iterate_all_parameters(parameterized_ds)
assert next(generator) == parameterized_ds['exchanges'][0]
assert next(generator) == parameterized_ds['exchanges'][0]['production volume']
assert next(generator) == parameterized_ds['exchanges'][0]['properties'][0]
assert next(generator) == parameterized_ds['exchanges'][1]
assert next(generator) == parameterized_ds['exchanges'][1]['properties'][0]
assert next(generator) == parameterized_ds['parameters'][0]
assert next(generator) == parameterized_ds['parameters'][1]
@pytest.fixture(scope="function")
def uncertain_ds():
return {
'exchanges': [{
'amount': 3.1415926535,
'uncertainty': '',
'production volume': { # Nonsensical but should work
'uncertainty': '',
'amount': 42
},
'properties': [{
'uncertainty': '',
'amount': 17
}]
}, {
'uncertainty': '',
'properties': [{
'uncertainty': '',
}]
}],
'parameters': [{
'uncertainty': '',
}, {
'uncertainty': '',
'amount': 1
}]
}
def test_iterate_all_uncertainties(uncertain_ds):
generator = iterate_all_uncertainties(uncertain_ds)
assert next(generator) == uncertain_ds['exchanges'][0]
assert next(generator) == uncertain_ds['exchanges'][0]['production volume']
assert next(generator) == uncertain_ds['exchanges'][0]['properties'][0]
assert next(generator) == uncertain_ds['exchanges'][1]
assert next(generator) == uncertain_ds['exchanges'][1]['properties'][0]
assert next(generator) == uncertain_ds['parameters'][0]
assert next(generator) == uncertain_ds['parameters'][1]
def test_activity_hash():
given = {
'name': 'a',
'reference product': 'b',
'unit': 'c',
'location': 'd',
'start date': 'e',
'end date': 'f',
'foo': 'bar',
}
assert activity_hash({})
assert activity_hash(given)
def test_get_biggest_pv_to_exchange_ratio():
given = {'exchanges': [{
'amount': 2,
'production volume': {'amount': 10},
'type': 'reference product',
}, {
'amount': 5,
'production volume': {'amount': 20},
'type': 'reference product',
}, {
'amount': 1,
'production volume': {'amount': 20},
'type': 'not reference product',
}]}
assert get_biggest_pv_to_exchange_ratio(given) == 5
def test_get_biggest_pv_to_exchange_ratio_neg_numbers():
given = {'exchanges': [{
'amount': -2,
'production volume': {'amount': 10},
'type': 'reference product',
}, {
'amount': -5,
'production volume': {'amount': 20},
'type': 'reference product',
}]}
assert get_biggest_pv_to_exchange_ratio(given) == -5
def test_get_biggest_pv_to_exchange_ratio_no_rps():
error = {
'exchanges': [],
'name': ''
}
with pytest.raises(ZeroProduction):
get_biggest_pv_to_exchange_ratio(error)
@pytest.fixture
def func():
@single_input
def f(dataset):
"""A docstring"""
return [dataset * 2]
f.__table__ = "Something about a table"
return f
def test_single_input_metadata(func):
metadata = get_function_meta(func)
assert metadata['name'] == 'f'
assert metadata['description'] == "A docstring"
assert metadata['table'] == "Something about a table"
def test_single_input_correct_unrolling(func):
assert func([1, 2, 3]) == [2, 4, 6]
| 30.221532 | 89 | 0.555525 |
from ocelot.errors import InvalidMultioutputDataset, ZeroProduction
from ocelot.transformations.uncertainty import remove_exchange_uncertainty
from ocelot.transformations.utils import *
from ocelot.utils import get_function_meta
import pytest
def test_allocatable_production():
exchanges = [
{'type': 'reference product'},
{'type': 'not reference product'},
{'type': 'byproduct', 'byproduct classification': 'allocatable product'},
{'type': 'byproduct', 'byproduct classification': 'cat'},
{'type': 'byproduct', 'byproduct classification': 'allocatable product'},
]
dataset = {'exchanges': exchanges}
for x, y in zip(allocatable_production(dataset), exchanges[0:5:2]):
assert x == y
assert len(list(allocatable_production(dataset))) == 3
def test_allocatable_production_include_all_reference_products():
given = {"exchanges": [
{'type': 'reference product', 'byproduct classification': 'recyclable'},
{'type': 'reference product', 'byproduct classification': 'allocatable product'},
{'type': 'reference product', 'byproduct classification': 'waste'},
{'type': 'reference product', 'byproduct classification': 'foo'},
]}
assert len(list(allocatable_production(given))) == 4
def test_nonproduction_exchanges():
exchanges = [
{'type': 'reference product'},
{'type': 'not reference product'},
{'type': 'byproduct', 'byproduct classification': 'allocatable product'},
{'type': 'byproduct', 'byproduct classification': 'cat'},
{'type': 'byproduct', 'byproduct classification': 'allocatable product'},
]
dataset = {'exchanges': exchanges}
for x, y in zip(nonproduction_exchanges(dataset), exchanges[1:4:2]):
assert x == y
assert len(list(nonproduction_exchanges(dataset))) == 2
def test_get_property_by_name():
given = {'properties': [{'name': 'foo', 'amount': 42}]}
assert get_property_by_name(given, "foo") == {'name': 'foo', 'amount': 42}
def test_get_property_by_name_not_present():
given = {'properties': [{'name': 'bar', 'amount': 42}]}
assert get_property_by_name({}, "foo") == {}
def test_get_numerical_property():
given = {'properties': [{'name': 'foo', 'amount': 42}]}
assert get_numerical_property(given, "foo") == 42
def test_get_numerical_property_no_properties():
assert get_numerical_property({}, "foo") is None
def test_get_numerical_property_not_correct_property():
given = {'properties': [{'name': 'foo', 'amount': 42}]}
assert get_numerical_property(given, "bar") is None
def test_single_reference_product():
given = {'exchanges': [
{
'type': 'reference product',
'name': 'sandwich'
},
{
'type': 'not reference product',
'name': 'woooo!'
},
{
'type': 'byproduct',
'byproduct classification': 'allocatable product',
'name': 'should be skipped'
},
]}
expected = {'type': 'reference product', 'name': 'sandwich'}
assert get_single_reference_product(given) == expected
def test_single_reference_product_multiple():
given = {
'filepath': 'foo',
'exchanges': [
{
'type': 'reference product',
'name': 'sandwich'
},
{
'type': 'reference product',
'name': 'hamburger'
},
]
}
with pytest.raises(InvalidMultioutputDataset):
get_single_reference_product(given)
def test_single_reference_product_none():
with pytest.raises(ValueError):
get_single_reference_product({
'filepath': 'foo',
'exchanges': [{'type': 'something'}]
})
def test_normalize_reference_production_logging():
given = {
'name': 'foo',
'exchanges': [
{
'type': 'reference product',
'name': 'bar',
'amount': 0.5
},
{
'type': 'something else',
'amount': 10
}
]
}
assert normalize_reference_production_amount(given)
def test_normalize_reference_production_epsilon():
given = {
'name': 'foo',
'exchanges': [
{
'type': 'reference product',
'amount': 0.9
},
{
'type': 'something else',
'amount': 10
}
]
}
expected = {
'name': 'foo',
'exchanges': [
{
'type': 'reference product',
'amount': 0.9
},
{
'type': 'something else',
'amount': 10
}
]
}
result = normalize_reference_production_amount(given, epsilon=0.5, log=False)[0]
assert result == expected
def test_normalize_reference_production_amount():
given = {'exchanges': [
{
'type': 'reference product',
'amount': 0.5
},
{
'type': 'something else',
'amount': 10
}
]}
expected = {'exchanges': [
{
'type': 'reference product',
'amount': 1
},
{
'type': 'something else',
'amount': 20
}
]}
assert normalize_reference_production_amount(given, log=False)[0] == expected
def test_normalize_reference_production_amount_zero_amount():
given = {
'filepath': 'foo',
'exchanges': [{
'type': 'reference product',
'amount': 0
}]
}
with pytest.raises(ZeroProduction):
normalize_reference_production_amount(given, log=False)
def test_activity_grouper():
given = {
'name': 'foo',
'exchanges': []
}
assert activity_grouper(given) == ('foo', ())
given = {
'name': 'bar',
'exchanges': [{
'type': 'reference product',
'name': 'b'
}, {
'type': 'reference product',
'name': 'a'
}]
}
assert activity_grouper(given) == ('bar', ('a', 'b'))
def test_label_reference_product():
valid = [{
'type': 'transforming activity',
'exchanges': [{
'type': 'reference product',
'name': 'foo'
}]
}]
expected = [{
'type': 'transforming activity',
'reference product': 'foo',
'exchanges': [{
'type': 'reference product',
'name': 'foo'
}]
}]
assert label_reference_product(valid) == expected
def test_label_reference_product_no_exchanges():
invalid = [{
'filepath': '',
'type': 'transforming activity',
'exchanges': [{'type': 'nope'}]
}]
with pytest.raises(ValueError):
label_reference_product(invalid)
def test_label_reference_product_multiple_rp():
invalid = [{
'filepath': '',
'type': 'transforming activity',
'exchanges': [
{'type': 'reference product'},
{'type': 'reference product'},
]
}]
with pytest.raises(InvalidMultioutputDataset):
label_reference_product(invalid)
def test_remove_uncertainty():
expected = {'amount': 42}
assert remove_exchange_uncertainty({'amount': 42}) == expected
def test_nonreference_product():
given = {
'production volume': 'delete me',
'formula': 'delete me too'
}
expected = {
'type': 'dropped product',
'amount': 0,
}
assert nonreference_product(given) == expected
@pytest.fixture(scope="function")
def no_normalization(monkeypatch):
monkeypatch.setattr(
'ocelot.transformations.utils.normalize_reference_production_amount',
lambda x, log=True, epsilon=1e-14: x
)
def test_choose_reference_product_exchange(no_normalization):
given = {'exchanges': [{
'type': 'reference product',
'amount': 42
}, {
'type': 'reference product',
'amount': 20,
'formula': 'delete me',
'production volume': 'delete me too',
}, {
'type': 'other thing',
'amount': 100
}]}
expected = {'exchanges': [{
'amount': 42,
'type': 'reference product',
}, {
'type': 'dropped product',
'amount': 0,
}, {
'type': 'other thing',
'amount': 10
}]}
answer = choose_reference_product_exchange(given, given['exchanges'][0], 0.1)
assert answer == expected
for one, two in zip(given['exchanges'], answer['exchanges']):
assert one is not two
def test_choose_reference_product_exchange_byproducts(no_normalization):
given = {'exchanges': [{
'type': 'byproduct',
'byproduct classification': "allocatable product",
'amount': 42
}, {
'type': 'reference product',
'amount': 20,
'production volume': 'delete me',
'formula': 'delete me too',
}, {
'type': 'other thing',
'amount': 100
}]}
expected = {'exchanges': [{
'amount': 42,
'type': 'reference product',
}, {
'type': 'dropped product',
'amount': 0,
}, {
'type': 'other thing',
'amount': 10
}]}
answer = choose_reference_product_exchange(given, given['exchanges'][0], 0.1)
assert answer == expected
for one, two in zip(given['exchanges'], answer['exchanges']):
assert one is not two
def test_choose_reference_product_exchange_zero_production(no_normalization):
given = {
'filepath': 'foo',
'exchanges': [{
'type': 'reference product',
'amount': 0
}]
}
with pytest.raises(ZeroProduction):
choose_reference_product_exchange(given, given['exchanges'][0])
@pytest.fixture(scope="function")
def parameterized_ds():
return {
'exchanges': [{
'amount': 3.1415926535,
'variable': 'pie',
'production volume': {
'variable': 'number_blueberries',
'amount': 42
},
'properties': [{
'variable': 'blueberry_volume',
'amount': 17
}]
}, {
'variable': 'circle',
'formula': 'pie * radius ** 2',
'properties': [{
'variable': 'radius',
'formula': 'blueberry_size * number_blueberries'
}]
}],
'parameters': [{
'variable': 'blueberry_size',
'formula': 'blueberry_density * blueberry_volume'
}, {
'variable': 'blueberry_density',
'amount': 1
}]
}
def test_iterate_all_parameters(parameterized_ds):
generator = iterate_all_parameters(parameterized_ds)
assert next(generator) == parameterized_ds['exchanges'][0]
assert next(generator) == parameterized_ds['exchanges'][0]['production volume']
assert next(generator) == parameterized_ds['exchanges'][0]['properties'][0]
assert next(generator) == parameterized_ds['exchanges'][1]
assert next(generator) == parameterized_ds['exchanges'][1]['properties'][0]
assert next(generator) == parameterized_ds['parameters'][0]
assert next(generator) == parameterized_ds['parameters'][1]
@pytest.fixture(scope="function")
def uncertain_ds():
return {
'exchanges': [{
'amount': 3.1415926535,
'uncertainty': '',
'production volume': {
'uncertainty': '',
'amount': 42
},
'properties': [{
'uncertainty': '',
'amount': 17
}]
}, {
'uncertainty': '',
'properties': [{
'uncertainty': '',
}]
}],
'parameters': [{
'uncertainty': '',
}, {
'uncertainty': '',
'amount': 1
}]
}
def test_iterate_all_uncertainties(uncertain_ds):
generator = iterate_all_uncertainties(uncertain_ds)
assert next(generator) == uncertain_ds['exchanges'][0]
assert next(generator) == uncertain_ds['exchanges'][0]['production volume']
assert next(generator) == uncertain_ds['exchanges'][0]['properties'][0]
assert next(generator) == uncertain_ds['exchanges'][1]
assert next(generator) == uncertain_ds['exchanges'][1]['properties'][0]
assert next(generator) == uncertain_ds['parameters'][0]
assert next(generator) == uncertain_ds['parameters'][1]
def test_activity_hash():
given = {
'name': 'a',
'reference product': 'b',
'unit': 'c',
'location': 'd',
'start date': 'e',
'end date': 'f',
'foo': 'bar',
}
assert activity_hash({})
assert activity_hash(given)
def test_get_biggest_pv_to_exchange_ratio():
given = {'exchanges': [{
'amount': 2,
'production volume': {'amount': 10},
'type': 'reference product',
}, {
'amount': 5,
'production volume': {'amount': 20},
'type': 'reference product',
}, {
'amount': 1,
'production volume': {'amount': 20},
'type': 'not reference product',
}]}
assert get_biggest_pv_to_exchange_ratio(given) == 5
def test_get_biggest_pv_to_exchange_ratio_neg_numbers():
given = {'exchanges': [{
'amount': -2,
'production volume': {'amount': 10},
'type': 'reference product',
}, {
'amount': -5,
'production volume': {'amount': 20},
'type': 'reference product',
}]}
assert get_biggest_pv_to_exchange_ratio(given) == -5
def test_get_biggest_pv_to_exchange_ratio_no_rps():
error = {
'exchanges': [],
'name': ''
}
with pytest.raises(ZeroProduction):
get_biggest_pv_to_exchange_ratio(error)
@pytest.fixture
def func():
@single_input
def f(dataset):
return [dataset * 2]
f.__table__ = "Something about a table"
return f
def test_single_input_metadata(func):
metadata = get_function_meta(func)
assert metadata['name'] == 'f'
assert metadata['description'] == "A docstring"
assert metadata['table'] == "Something about a table"
def test_single_input_correct_unrolling(func):
assert func([1, 2, 3]) == [2, 4, 6]
| true | true |
f7fdc6d798a802b061a34ada5a01b6aebd4cb6cb | 3,601 | py | Python | sdk/keyvault/azure-keyvault-secrets/samples/backup_restore_operations_async.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/keyvault/azure-keyvault-secrets/samples/backup_restore_operations_async.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | null | null | null | sdk/keyvault/azure-keyvault-secrets/samples/backup_restore_operations_async.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import os
from azure.keyvault.secrets.aio import SecretClient
from azure.identity.aio import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli)
#
# 2. Microsoft Azure Key Vault PyPI package -
# https://pypi.python.org/pypi/azure-keyvault-secrets/
#
# 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_URL
# (See https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/keyvault/azure-keyvault-keys#authenticate-the-client)
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates the basic backup and restore operations on a vault(secret) resource for Azure Key Vault
#
# 1. Create a secret (set_secret)
#
# 2. Backup a secret (backup_secret)
#
# 3. Delete a secret (delete_secret)
#
# 4. Restore a secret (restore_secret_backup)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a secret client that will be used to call the service.
# Notice that the client is using default Azure credentials.
# To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID',
# 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = SecretClient(vault_url=VAULT_URL, credential=credential)
try:
# Let's create a secret holding storage account credentials.
# if the secret already exists in the Key Vault, then a new version of the secret is created.
print("\n.. Create Secret")
secret = await client.set_secret("backupRestoreSecretName", "backupRestoreSecretValue")
print("Secret with name '{0}' created with value '{1}'".format(secret.name, secret.value))
# Backups are good to have, if in case secrets gets deleted accidentally.
# For long term storage, it is ideal to write the backup to a file.
print("\n.. Create a backup for an existing Secret")
secret_backup = await client.backup_secret(secret.name)
print("Backup created for secret with name '{0}'.".format(secret.name))
# The storage account secret is no longer in use, so you delete it.
print("\n.. Deleting secret...")
await client.delete_secret(secret.name)
print("Deleted Secret with name '{0}'".format(secret.name))
# In future, if the secret is required again, we can use the backup value to restore it in the Key Vault.
print("\n.. Restore the secret using the backed up secret bytes")
secret = await client.restore_secret_backup(secret_backup)
print("Restored Secret with name '{0}'".format(secret.name))
except HttpResponseError as e:
print("\nrun_sample has caught an error. {0}".format(e.message))
finally:
print("\nrun_sample done")
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
except Exception as e:
print("Top level Error: {0}".format(str(e)))
| 46.166667 | 125 | 0.631491 |
import asyncio
import os
from azure.keyvault.secrets.aio import SecretClient
from azure.identity.aio import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
n_sample():
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = SecretClient(vault_url=VAULT_URL, credential=credential)
try:
# if the secret already exists in the Key Vault, then a new version of the secret is created.
print("\n.. Create Secret")
secret = await client.set_secret("backupRestoreSecretName", "backupRestoreSecretValue")
print("Secret with name '{0}' created with value '{1}'".format(secret.name, secret.value))
# Backups are good to have, if in case secrets gets deleted accidentally.
# For long term storage, it is ideal to write the backup to a file.
print("\n.. Create a backup for an existing Secret")
secret_backup = await client.backup_secret(secret.name)
print("Backup created for secret with name '{0}'.".format(secret.name))
# The storage account secret is no longer in use, so you delete it.
print("\n.. Deleting secret...")
await client.delete_secret(secret.name)
print("Deleted Secret with name '{0}'".format(secret.name))
# In future, if the secret is required again, we can use the backup value to restore it in the Key Vault.
print("\n.. Restore the secret using the backed up secret bytes")
secret = await client.restore_secret_backup(secret_backup)
print("Restored Secret with name '{0}'".format(secret.name))
except HttpResponseError as e:
print("\nrun_sample has caught an error. {0}".format(e.message))
finally:
print("\nrun_sample done")
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
except Exception as e:
print("Top level Error: {0}".format(str(e)))
| true | true |
f7fdc756cae32f809030a4d54be7f39f3dad94a3 | 2,376 | py | Python | src/data/modelling_movement.py | Albert-GM/TFM | 2574f7cd1411ff253d045c7dff894de36659aae8 | [
"MIT"
] | null | null | null | src/data/modelling_movement.py | Albert-GM/TFM | 2574f7cd1411ff253d045c7dff894de36659aae8 | [
"MIT"
] | null | null | null | src/data/modelling_movement.py | Albert-GM/TFM | 2574f7cd1411ff253d045c7dff894de36659aae8 | [
"MIT"
] | null | null | null | # =============================================================================
# Makes a model simulating the movement of people between countries.
# =============================================================================
import pandas as pd
import numpy as np
import networkx as nx
import os
import re
root_project = re.findall(r'(^\S*TFM)', os.getcwd())[0]
df = pd.read_pickle(f"{root_project}/data/interim/country_info_nonans.pickle")
df_move = df.loc[:, ['country_name', 'country_code']]
# Arrivals and departures by country and day
df_move['arrivals/day'] = df['arrivals'] / 365
df_move['departures/day'] = df['departures'] / 365
# Ratio of arrivals to total by country
df_move['prop_arrivals'] = df_move['arrivals/day'] / \
np.sum(df_move['arrivals/day'])
countrycode_to_proparriv = pd.Series(
df_move['prop_arrivals'].values, index=df_move['country_code']).to_dict()
countrycode_to_departures = pd.Series(
df_move['departures/day'].values, index=df_move['country_code']).to_dict()
# Add to the dataframe a column with info about the number of people going from
# one country to another
l_people = []
df_people = df.copy()
for country in df.iterrows():
# Possibles destinations of country
country_destinations = country[1]['destinations']
# Compute probabilities of going to each of destinations
prob = {x: countrycode_to_proparriv[x] for x in country_destinations}
sum_prob = np.sum(list(prob.values()))
# Probabilities of going to each of destinations normalized. sum=1
prob = {k: v / sum_prob for k, v in prob.items()}
# Compute individuals going from country to destinations
people = {k: int(round(
v * countrycode_to_departures[country[1]['country_code']], 0))
for k, v in prob.items()}
l_people.append(people)
df['departures/day'] = l_people
df.drop('destinations', axis=1, inplace=True)
# Make origin-destination matrix from graph
H = nx.DiGraph()
for index, country in df.iterrows():
destinations = country['departures/day']
for k, v in destinations.items():
H.add_edge(country['country_code'], k, people=v)
OD_matrix = nx.attr_matrix(H, edge_attr='people', rc_order=df['country_code'])
# Uncomment to save new data
# df.to_pickle(f"{root_project}/data/interim/country_info_final.pickle")
# np.save(f"{root_project}/data/interim/od_matrix.npy", OD_matrix)
| 36 | 79 | 0.673401 |
import pandas as pd
import numpy as np
import networkx as nx
import os
import re
root_project = re.findall(r'(^\S*TFM)', os.getcwd())[0]
df = pd.read_pickle(f"{root_project}/data/interim/country_info_nonans.pickle")
df_move = df.loc[:, ['country_name', 'country_code']]
df_move['arrivals/day'] = df['arrivals'] / 365
df_move['departures/day'] = df['departures'] / 365
df_move['prop_arrivals'] = df_move['arrivals/day'] / \
np.sum(df_move['arrivals/day'])
countrycode_to_proparriv = pd.Series(
df_move['prop_arrivals'].values, index=df_move['country_code']).to_dict()
countrycode_to_departures = pd.Series(
df_move['departures/day'].values, index=df_move['country_code']).to_dict()
l_people = []
df_people = df.copy()
for country in df.iterrows():
country_destinations = country[1]['destinations']
prob = {x: countrycode_to_proparriv[x] for x in country_destinations}
sum_prob = np.sum(list(prob.values()))
prob = {k: v / sum_prob for k, v in prob.items()}
people = {k: int(round(
v * countrycode_to_departures[country[1]['country_code']], 0))
for k, v in prob.items()}
l_people.append(people)
df['departures/day'] = l_people
df.drop('destinations', axis=1, inplace=True)
H = nx.DiGraph()
for index, country in df.iterrows():
destinations = country['departures/day']
for k, v in destinations.items():
H.add_edge(country['country_code'], k, people=v)
OD_matrix = nx.attr_matrix(H, edge_attr='people', rc_order=df['country_code'])
| true | true |
f7fdc76521540ed1826fa0bcaaaa7dd3534f1785 | 8,991 | py | Python | src/settings.py | roadlit/vandal | 20ca417876e7052fd5b40b381eafca234cbb0bf8 | [
"BSD-3-Clause"
] | 109 | 2017-12-09T23:49:36.000Z | 2022-03-10T11:52:54.000Z | src/settings.py | lukerQuant/vandal | a7160969494263bc5989ccb4ccff3defea23fe45 | [
"BSD-3-Clause"
] | 46 | 2017-12-09T06:06:49.000Z | 2022-01-07T13:35:18.000Z | src/settings.py | lukerQuant/vandal | a7160969494263bc5989ccb4ccff3defea23fe45 | [
"BSD-3-Clause"
] | 39 | 2017-12-08T11:13:43.000Z | 2022-02-28T09:07:27.000Z | # BSD 3-Clause License
#
# Copyright (c) 2016, 2017, The University of Sydney. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""settings.py: dataflow analysis settings.
The user can change these settings in bin/config.ini or by providing command
line flags to override them.
Default settings are stored in src/default_config.ini.
max_iterations:
The maximum number of graph analysis iterations.
Lower is faster, but potentially less precise.
A negative value means no limit. No limit by default.
bailout_seconds:
Begin to terminate the analysis loop if it's looking to take more time
than specified. Bailing out early may mean the analysis is not able
to reach a fixed-point, so the results may be less precise.
This is not a hard cap, as subsequent analysis steps are required,
and at least one iteration will always be performed.
A negative value means no cap on the running time.
No cap by default.
remove_unreachable:
Upon completion of the analysis, if there are blocks unreachable from the
contract root, remove them. False by default.
merge_unreachable:
Upon completion of the analysis, if there are blocks unreachable from the
contract root, merge them. True by default.
die_on_empty_pop:
Raise an exception if an empty stack is popped. False by default.
skip_stack_on_overflow:
Do not apply changes to exit stacks after a symbolic overflow occurrs
in their blocks. True by default.
reinit_stacks:
Reinitialise all blocks' exit stacks to be empty. True by default.
hook_up_stack_vars:
After completing the analysis, propagate entry stack values into blocks.
True by default.
hook_up_jumps:
Connect any new edges that can be inferred after performing the analysis.
True by default.
mutate_jumps:
JUMPIs with known conditions become JUMPs (or are deleted).
For example, a JUMPI with a known-true condition becomes a JUMP.
False by default.
generate_throws:
JUMP and JUMPI instructions with invalid destinations become THROW and
THROWIs. False by default.
final_mutate_jumps:
Mutate jumps in the final analysis phase. False by default.
final_generate_throws:
generate throws in the final analysis phase. True by default.
mutate_blockwise:
Hook up stack vars and/or hook up jumps after each block rather than after
the whole analysis is complete. True by default.
clamp_large_stacks:
If stacks start growing deeper without more of the program's control flow
graph being inferred for sufficiently many iterations, we can freeze the
maximum stack size in order to save computation.
True by default.
clamp_stack_minimum:
Stack sizes will not be clamped smaller than this value. Default value is 20.
widen_variables:
If any computed variable's number of possible values exceeds a given
threshold, widen its value to Top. True by default.
widen_threshold:
Whenever the result of an operation may take more than this number of
possible values, then widen the result variable's value to the Top lattice
value (treat its value as unconstrained).
Default value is 10.
set_valued_ops:
If True, apply arithmetic operations to variables with multiple values;
otherwise, only apply them to variables whose value takes only one
value.
Disable to gain speed at the cost of precision. True by default.
analytics:
If True, dataflow analysis will return a dict of information about
the contract, otherwise return an empty dict.
Disabling this might yield a slight speed improvement. False by default.
extract_functions:
If True, attempt to extract solidity functions.
mark_functions:
If true, tag block names with the function(s) they belong to.
strict:
If true, then unrecognised opcodes and invalid disassembly
will not be skipped, but will result in an error.
Note: If we have already reached complete information about our stack CFG
structure and stack states, we can use die_on_empty_pop and reinit_stacks
to discover places where empty stack exceptions will be thrown.
"""
# The settings - these are None until initialised by import_config
max_iterations = None
bailout_seconds = None
remove_unreachable = None
merge_unreachable = None
die_on_empty_pop = None
skip_stack_on_overflow = None
reinit_stacks = None
hook_up_stack_vars = None
hook_up_jumps = None
mutate_jumps = None
generate_throws = None
final_mutate_jumps = None
final_generate_throws = None
mutate_blockwise = None
clamp_large_stacks = None
clamp_stack_minimum = None
widen_variables = None
widen_threshold = None
set_valued_ops = None
analytics = None
extract_functions = None
mark_functions = None
strict = None
# A reference to this module for retrieving its members; import sys like this so that it does not appear in _names_.
_module_ = __import__("sys").modules[__name__]
# The names of all the settings defined above.
_names_ = [s for s in dir(_module_) if not (s.startswith("_"))]
# Set up the types of the various settings, so they can be converted
# correctly when being read from config.
_types_ = {n: ("int" if n in ["max_iterations", "bailout_seconds",
"clamp_stack_minimum", "widen_threshold"]
else "bool") for n in _names_}
# A stack for saving and restoring setting configurations.
_stack_ = []
# Imports and definitions appearing below the definition of _names_
# do not appear in that list, by design. Don't move them up.
import logging
import sys
from os.path import dirname, normpath, join
_dir_ = dirname(__file__)
# Default settings are stored here.
_DEFAULT_LOC_ = normpath(join(_dir_, "../src/default_config.ini"))
# User settings are located here, and will override default settings.
_CONFIG_LOC_ = normpath(join(_dir_, "../bin/config.ini"))
def _get_dict_():
"""
Return the current module's dictionary of members so the settings can be
dynamically accessed by name.
"""
return _module_.__dict__
def save():
"""Push the current setting configuration to the stack."""
sd = _get_dict_()
_stack_.append({n: sd[n] for n in _names_})
def restore():
"""Restore the setting configuration from the top of the stack."""
_get_dict_().update(_stack_.pop())
def set_from_string(setting_name: str, value: str):
"""
Assign to the named setting the given value, first converting that value
to the type appropriate for that setting.
Names and values are not case sensitive.
"""
name = setting_name.lower()
val = value.lower()
if name not in _names_:
logging.error('Unrecognised setting "%s".', setting_name)
sys.exit(1)
if _types_[name] == "int":
_get_dict_()[name] = int(val)
elif _types_[name] == "bool":
if val in {"1", "yes", "true", "on"}:
_get_dict_()[name] = True
elif val in {"0", "no", "false", "off"}:
_get_dict_()[name] = False
else:
logging.error('Cannot interpret value "%s" as boolean for setting "%s"',
value, setting_name)
sys.exit(1)
else:
logging.error('Unknown type "%s" for setting "%s".', setting_name)
sys.exit(1)
def import_config(filepath: str = _CONFIG_LOC_):
"""
Import settings from the given configuration file.
This should be called before running the decompiler.
"""
import configparser
config = configparser.ConfigParser()
with open(_DEFAULT_LOC_) as default:
config.read_file(default)
config.read(filepath)
for name in _names_:
set_from_string(name, config.get("settings", name))
| 35.258824 | 116 | 0.748081 |
max_iterations = None
bailout_seconds = None
remove_unreachable = None
merge_unreachable = None
die_on_empty_pop = None
skip_stack_on_overflow = None
reinit_stacks = None
hook_up_stack_vars = None
hook_up_jumps = None
mutate_jumps = None
generate_throws = None
final_mutate_jumps = None
final_generate_throws = None
mutate_blockwise = None
clamp_large_stacks = None
clamp_stack_minimum = None
widen_variables = None
widen_threshold = None
set_valued_ops = None
analytics = None
extract_functions = None
mark_functions = None
strict = None
_module_ = __import__("sys").modules[__name__]
_names_ = [s for s in dir(_module_) if not (s.startswith("_"))]
_types_ = {n: ("int" if n in ["max_iterations", "bailout_seconds",
"clamp_stack_minimum", "widen_threshold"]
else "bool") for n in _names_}
_stack_ = []
import logging
import sys
from os.path import dirname, normpath, join
_dir_ = dirname(__file__)
# Default settings are stored here.
_DEFAULT_LOC_ = normpath(join(_dir_, "../src/default_config.ini"))
# User settings are located here, and will override default settings.
_CONFIG_LOC_ = normpath(join(_dir_, "../bin/config.ini"))
def _get_dict_():
return _module_.__dict__
def save():
sd = _get_dict_()
_stack_.append({n: sd[n] for n in _names_})
def restore():
_get_dict_().update(_stack_.pop())
def set_from_string(setting_name: str, value: str):
name = setting_name.lower()
val = value.lower()
if name not in _names_:
logging.error('Unrecognised setting "%s".', setting_name)
sys.exit(1)
if _types_[name] == "int":
_get_dict_()[name] = int(val)
elif _types_[name] == "bool":
if val in {"1", "yes", "true", "on"}:
_get_dict_()[name] = True
elif val in {"0", "no", "false", "off"}:
_get_dict_()[name] = False
else:
logging.error('Cannot interpret value "%s" as boolean for setting "%s"',
value, setting_name)
sys.exit(1)
else:
logging.error('Unknown type "%s" for setting "%s".', setting_name)
sys.exit(1)
def import_config(filepath: str = _CONFIG_LOC_):
import configparser
config = configparser.ConfigParser()
with open(_DEFAULT_LOC_) as default:
config.read_file(default)
config.read(filepath)
for name in _names_:
set_from_string(name, config.get("settings", name))
| true | true |
f7fdc8ca01ef7ed880bb6aa1de30b30a02e6f042 | 248,656 | py | Python | geemap/geemap.py | mtoqeerpk/geemap | 70ebe305d25a7a5a5191b24595b4180fb7962f52 | [
"MIT"
] | null | null | null | geemap/geemap.py | mtoqeerpk/geemap | 70ebe305d25a7a5a5191b24595b4180fb7962f52 | [
"MIT"
] | null | null | null | geemap/geemap.py | mtoqeerpk/geemap | 70ebe305d25a7a5a5191b24595b4180fb7962f52 | [
"MIT"
] | 1 | 2021-07-18T14:52:48.000Z | 2021-07-18T14:52:48.000Z | """Main module for interactive mapping using Google Earth Engine Python API and ipyleaflet.
Keep in mind that Earth Engine functions use both camel case and snake case, such as setOptions(), setCenter(), centerObject(), addLayer().
ipyleaflet functions use snake case, such as add_tile_layer(), add_wms_layer(), add_minimap().
"""
import colour
import ee
import geocoder
import ipyleaflet
import math
import os
import time
import ipywidgets as widgets
from bqplot import pyplot as plt
from ipyfilechooser import FileChooser
from ipyleaflet import *
from ipytree import Tree, Node
from IPython.display import display
from .basemaps import ee_basemaps
from .conversion import *
from .legends import builtin_legends
def ee_initialize(token_name='EARTHENGINE_TOKEN'):
"""Authenticates Earth Engine and initialize an Earth Engine session
"""
try:
ee_token = os.environ.get(token_name)
if ee_token is not None:
credential = '{"refresh_token":"%s"}' % ee_token
credential_file_path = os.path.expanduser("~/.config/earthengine/")
os.makedirs(credential_file_path, exist_ok=True)
with open(credential_file_path + 'credentials', 'w') as file:
file.write(credential)
elif in_colab_shell():
if credentials_in_drive() and (not credentials_in_colab()):
copy_credentials_to_colab()
elif not credentials_in_colab:
ee.Authenticate()
if is_drive_mounted() and (not credentials_in_drive()):
copy_credentials_to_drive()
else:
if is_drive_mounted():
copy_credentials_to_drive()
ee.Initialize()
except:
ee.Authenticate()
ee.Initialize()
class Map(ipyleaflet.Map):
"""The Map class inherits from ipyleaflet.Map. The arguments you can pass to the Map can be found at https://ipyleaflet.readthedocs.io/en/latest/api_reference/map.html. By default, the Map will add Google Maps as the basemap. Set add_google_map = False to use OpenStreetMap as the basemap.
Returns:
object: ipyleaflet map object.
"""
def __init__(self, **kwargs):
# Authenticates Earth Engine and initializes an Earth Engine session
ee_initialize()
# Default map center location and zoom level
latlon = [40, -100]
zoom = 4
# Interchangeable parameters between ipyleaflet and folium
if 'location' in kwargs.keys():
kwargs['center'] = kwargs['location']
kwargs.pop('location')
if 'center' in kwargs.keys():
latlon = kwargs['center']
else:
kwargs['center'] = latlon
if 'zoom_start' in kwargs.keys():
kwargs['zoom'] = kwargs['zoom_start']
kwargs.pop('zoom_start')
if 'zoom' in kwargs.keys():
zoom = kwargs['zoom']
else:
kwargs['zoom'] = zoom
if 'add_google_map' not in kwargs.keys():
kwargs['add_google_map'] = True
if 'show_attribution' not in kwargs.keys():
kwargs['show_attribution'] = True
if 'scroll_wheel_zoom' not in kwargs.keys():
kwargs['scroll_wheel_zoom'] = True
if 'zoom_control' not in kwargs.keys():
kwargs['zoom_control'] = True
if 'height' not in kwargs.keys():
kwargs['height'] = '550px'
# Inherits the ipyleaflet Map class
super().__init__(**kwargs)
self.layout.height = kwargs['height']
self.clear_controls()
self.draw_count = 0 # The number of shapes drawn by the user using the DrawControl
# The list of Earth Engine Geometry objects converted from geojson
self.draw_features = []
# The Earth Engine Geometry object converted from the last drawn feature
self.draw_last_feature = None
self.draw_layer = None
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = None
self.user_rois = None
self.roi_start = False
self.roi_end = False
self.roi_reducer = ee.Reducer.mean()
self.roi_reducer_scale = None
# List for storing pixel values and locations based on user-drawn geometries.
self.chart_points = []
self.chart_values = []
self.chart_labels = None
self.plot_widget = None # The plot widget for plotting Earth Engine data
self.plot_control = None # The plot control for interacting plotting
self.random_marker = None
self.legend_widget = None
self.legend_control = None
self.ee_layers = []
self.ee_layer_names = []
self.ee_raster_layers = []
self.ee_raster_layer_names = []
self.ee_layer_dict = {}
self.search_locations = None
self.search_loc_marker = None
self.search_loc_geom = None
self.search_datasets = None
self.screenshot = None
self.toolbar = None
self.toolbar_button = None
# Adds search button and search box
search_button = widgets.ToggleButton(
value=False,
tooltip='Search location/data',
icon='globe'
)
search_button.layout.width = '36px'
search_type = widgets.ToggleButtons(
options=['name/address', 'lat-lon', 'data'],
tooltips=['Search by place name or address',
'Search by lat-lon coordinates', 'Search Earth Engine data catalog']
)
search_type.style.button_width = '110px'
search_box = widgets.Text(
placeholder='Search by place name or address',
tooltip='Search location',
)
search_box.layout.width = '340px'
search_output = widgets.Output(
layout={'max_width': '340px', 'max_height': '250px', 'overflow': 'scroll'})
search_results = widgets.RadioButtons()
assets_dropdown = widgets.Dropdown()
assets_dropdown.layout.min_width = '279px'
assets_dropdown.layout.max_width = '279px'
assets_dropdown.options = []
import_btn = widgets.Button(
description='import',
button_style='primary',
tooltip='Click to import the selected asset',
)
import_btn.layout.min_width = '57px'
import_btn.layout.max_width = '57px'
def import_btn_clicked(b):
if assets_dropdown.value != '':
datasets = self.search_datasets
dataset = datasets[assets_dropdown.index]
dataset_uid = 'dataset_' + random_string(string_length=3)
line1 = '{} = {}\n'.format(
dataset_uid, dataset['ee_id_snippet'])
line2 = 'Map.addLayer(' + dataset_uid + \
', {}, "' + dataset['id'] + '")'
contents = ''.join([line1, line2])
create_code_cell(contents)
import_btn.on_click(import_btn_clicked)
html_widget = widgets.HTML()
def dropdown_change(change):
dropdown_index = assets_dropdown.index
if dropdown_index is not None and dropdown_index >= 0:
with search_output:
search_output.clear_output(wait=True)
print('Loading ...')
datasets = self.search_datasets
dataset = datasets[dropdown_index]
dataset_html = ee_data_html(dataset)
html_widget.value = dataset_html
search_output.clear_output(wait=True)
display(html_widget)
assets_dropdown.observe(dropdown_change, names='value')
assets_combo = widgets.HBox()
assets_combo.children = [import_btn, assets_dropdown]
def search_result_change(change):
result_index = search_results.index
locations = self.search_locations
location = locations[result_index]
latlon = (location.lat, location.lng)
self.search_loc_geom = ee.Geometry.Point(
location.lng, location.lat)
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
search_results.observe(search_result_change, names='value')
def search_btn_click(change):
if change['new']:
search_widget.children = [search_button, search_result_widget]
else:
search_widget.children = [search_button]
search_result_widget.children = [search_type, search_box]
search_button.observe(search_btn_click, 'value')
def search_type_changed(change):
search_box.value = ''
search_output.clear_output()
if change['new'] == 'name/address':
search_box.placeholder = 'Search by place name or address, e.g., Paris'
assets_dropdown.options = []
search_result_widget.children = [
search_type, search_box, search_output]
elif change['new'] == 'lat-lon':
search_box.placeholder = 'Search by lat-lon, e.g., 40, -100'
assets_dropdown.options = []
search_result_widget.children = [
search_type, search_box, search_output]
elif change['new'] == 'data':
search_box.placeholder = 'Search GEE data catalog by keywords, e.g., elevation'
search_result_widget.children = [
search_type, search_box, assets_combo, search_output]
search_type.observe(search_type_changed, names='value')
def search_box_callback(text):
if text.value != '':
if search_type.value == 'name/address':
g = geocode(text.value)
elif search_type.value == 'lat-lon':
g = geocode(text.value, reverse=True)
if g is None and latlon_from_text(text.value):
search_output.clear_output()
latlon = latlon_from_text(text.value)
self.search_loc_geom = ee.Geometry.Point(
latlon[1], latlon[0])
if self.search_loc_marker is None:
marker = Marker(
location=latlon, draggable=False, name='Search location')
self.search_loc_marker = marker
self.add_layer(marker)
self.center = latlon
else:
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
with search_output:
print('No address found for {}'.format(latlon))
return
elif search_type.value == 'data':
search_output.clear_output()
with search_output:
print('Searching ...')
self.default_style = {'cursor': 'wait'}
ee_assets = search_ee_data(text.value)
self.search_datasets = ee_assets
asset_titles = [x['title'] for x in ee_assets]
assets_dropdown.options = asset_titles
search_output.clear_output()
if len(ee_assets) > 0:
html_widget.value = ee_data_html(ee_assets[0])
with search_output:
display(html_widget)
self.default_style = {'cursor': 'default'}
return
self.search_locations = g
if g is not None and len(g) > 0:
top_loc = g[0]
latlon = (top_loc.lat, top_loc.lng)
self.search_loc_geom = ee.Geometry.Point(
top_loc.lng, top_loc.lat)
if self.search_loc_marker is None:
marker = Marker(
location=latlon, draggable=False, name='Search location')
self.search_loc_marker = marker
self.add_layer(marker)
self.center = latlon
else:
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
search_results.options = [x.address for x in g]
search_result_widget.children = [
search_type, search_box, search_output]
with search_output:
search_output.clear_output(wait=True)
display(search_results)
else:
with search_output:
search_output.clear_output()
print('No results could be found.')
search_box.on_submit(search_box_callback)
search_result_widget = widgets.VBox()
search_result_widget.children = [search_type, search_box]
search_widget = widgets.HBox()
search_widget.children = [search_button]
data_control = WidgetControl(
widget=search_widget, position='topleft')
self.add_control(control=data_control)
search_marker = Marker(icon=AwesomeIcon(
name="check", marker_color='green', icon_color='darkgreen'))
search = SearchControl(position="topleft",
url='https://nominatim.openstreetmap.org/search?format=json&q={s}',
zoom=5,
property_name='display_name',
marker=search_marker
)
self.add_control(search)
if kwargs['zoom_control']:
self.add_control(ZoomControl(position='topleft'))
layer_control = LayersControl(position='topright')
self.add_control(layer_control)
self.layer_control = layer_control
scale = ScaleControl(position='bottomleft')
self.add_control(scale)
self.scale_control = scale
fullscreen = FullScreenControl()
self.add_control(fullscreen)
self.fullscreen_control = fullscreen
measure = MeasureControl(
position='bottomleft',
active_color='orange',
primary_length_unit='kilometers'
)
self.add_control(measure)
self.measure_control = measure
if kwargs.get('add_google_map'):
self.add_layer(ee_basemaps['ROADMAP'])
if kwargs.get('show_attribution'):
self.add_control(AttributionControl(position='bottomright'))
draw_control = DrawControl(marker={'shapeOptions': {'color': '#0000FF'}},
rectangle={'shapeOptions': {
'color': '#0000FF'}},
circle={'shapeOptions': {
'color': '#0000FF'}},
circlemarker={},
)
draw_control_lite = DrawControl(marker={},
rectangle={'shapeOptions': {
'color': '#0000FF'}},
circle={'shapeOptions': {
'color': '#0000FF'}},
circlemarker={},
polyline={},
polygon={}
)
# Handles draw events
def handle_draw(target, action, geo_json):
try:
# print(geo_json)
# geo_json = adjust_longitude(geo_json)
# print(geo_json)
self.roi_start = True
self.draw_count += 1
geom = geojson_to_ee(geo_json, False)
self.user_roi = geom
feature = ee.Feature(geom)
self.draw_last_json = geo_json
self.draw_last_bounds = minimum_bounding_box(geo_json)
self.draw_last_feature = feature
self.draw_features.append(feature)
collection = ee.FeatureCollection(self.draw_features)
self.user_rois = collection
ee_draw_layer = ee_tile_layer(
collection, {'color': 'blue'}, 'Drawn Features', True, 0.5)
if self.draw_count == 1:
self.add_layer(ee_draw_layer)
self.draw_layer = ee_draw_layer
else:
self.substitute_layer(self.draw_layer, ee_draw_layer)
self.draw_layer = ee_draw_layer
draw_control.clear()
self.roi_end = True
self.roi_start = False
except Exception as e:
print(e)
print("There was an error creating Earth Engine Feature.")
self.draw_count = 0
self.draw_features = []
self.draw_last_feature = None
self.draw_layer = None
self.user_roi = None
self.roi_start = False
self.roi_end = False
draw_control.on_draw(handle_draw)
self.add_control(draw_control)
self.draw_control = draw_control
self.draw_control_lite = draw_control_lite
# Dropdown widget for plotting
self.plot_dropdown_control = None
self.plot_dropdown_widget = None
self.plot_options = {}
self.plot_marker_cluster = MarkerCluster(name="Marker Cluster")
self.plot_coordinates = []
self.plot_markers = []
self.plot_last_click = []
self.plot_all_clicks = []
# Adds Inspector widget
inspector_checkbox = widgets.Checkbox(
value=False,
description='Inspector',
indent=False,
layout=widgets.Layout(height='18px')
)
inspector_checkbox.layout.width = '13ex'
# Adds Plot widget
plot_checkbox = widgets.Checkbox(
value=False,
description='Plotting',
indent=False,
)
plot_checkbox.layout.width = '13ex'
self.plot_checkbox = plot_checkbox
vb = widgets.VBox(children=[inspector_checkbox, plot_checkbox])
chk_control = WidgetControl(widget=vb, position='topright')
self.add_control(chk_control)
self.inspector_control = chk_control
self.inspector_checked = inspector_checkbox.value
self.plot_checked = plot_checkbox.value
def inspect_chk_changed(b):
self.inspector_checked = inspector_checkbox.value
if not self.inspector_checked:
output.clear_output()
inspector_checkbox.observe(inspect_chk_changed)
output = widgets.Output(layout={'border': '1px solid black'})
output_control = WidgetControl(widget=output, position='topright')
self.add_control(output_control)
def plot_chk_changed(button):
if button['name'] == 'value' and button['new']:
self.plot_checked = True
plot_dropdown_widget = widgets.Dropdown(
options=list(self.ee_raster_layer_names),
)
plot_dropdown_widget.layout.width = '18ex'
self.plot_dropdown_widget = plot_dropdown_widget
plot_dropdown_control = WidgetControl(
widget=plot_dropdown_widget, position='topright')
self.plot_dropdown_control = plot_dropdown_control
self.add_control(plot_dropdown_control)
self.remove_control(self.draw_control)
self.add_control(self.draw_control_lite)
elif button['name'] == 'value' and (not button['new']):
self.plot_checked = False
plot_dropdown_widget = self.plot_dropdown_widget
plot_dropdown_control = self.plot_dropdown_control
self.remove_control(plot_dropdown_control)
del plot_dropdown_widget
del plot_dropdown_control
if self.plot_control in self.controls:
plot_control = self.plot_control
plot_widget = self.plot_widget
self.remove_control(plot_control)
self.plot_control = None
self.plot_widget = None
del plot_control
del plot_widget
if self.plot_marker_cluster is not None and self.plot_marker_cluster in self.layers:
self.remove_layer(self.plot_marker_cluster)
self.remove_control(self.draw_control_lite)
self.add_control(self.draw_control)
plot_checkbox.observe(plot_chk_changed)
tool_output = widgets.Output()
tool_output.clear_output(wait=True)
save_map_widget = widgets.VBox()
save_type = widgets.ToggleButtons(
options=['HTML', 'PNG', 'JPG'],
tooltips=['Save the map as an HTML file',
'Take a screenshot and save as a PNG file',
'Take a screenshot and save as a JPG file']
)
# download_dir = os.getcwd()
file_chooser = FileChooser(os.getcwd())
file_chooser.default_filename = 'my_map.html'
file_chooser.use_dir_icons = False
ok_cancel = widgets.ToggleButtons(
options=['OK', 'Cancel'],
tooltips=['OK', 'Cancel'],
button_style='primary'
)
ok_cancel.value = None
def save_type_changed(change):
ok_cancel.value = None
# file_chooser.reset()
file_chooser.default_path = os.getcwd()
if change['new'] == 'HTML':
file_chooser.default_filename = 'my_map.html'
elif change['new'] == 'PNG':
file_chooser.default_filename = 'my_map.png'
elif change['new'] == 'JPG':
file_chooser.default_filename = 'my_map.jpg'
save_map_widget.children = [save_type, file_chooser]
def chooser_callback(chooser):
# file_chooser.default_path = os.getcwd()
save_map_widget.children = [save_type, file_chooser, ok_cancel]
def ok_cancel_clicked(change):
if change['new'] == 'OK':
file_path = file_chooser.selected
ext = os.path.splitext(file_path)[1]
if save_type.value == 'HTML' and ext.upper() == '.HTML':
tool_output.clear_output()
self.to_html(file_path)
elif save_type.value == 'PNG' and ext.upper() == '.PNG':
tool_output.clear_output()
self.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
elif save_type.value == 'JPG' and ext.upper() == '.JPG':
tool_output.clear_output()
self.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
else:
label = widgets.Label(
value="The selected file extension does not match the selected exporting type.")
save_map_widget.children = [save_type, file_chooser, label]
self.toolbar_reset()
elif change['new'] == 'Cancel':
tool_output.clear_output()
self.toolbar_reset()
save_type.observe(save_type_changed, names='value')
ok_cancel.observe(ok_cancel_clicked, names='value')
file_chooser.register_callback(chooser_callback)
save_map_widget.children = [save_type, file_chooser]
tools = {
'mouse-pointer': 'pointer',
'camera': 'to_image',
'info': 'identify',
'map-marker': 'plotting'
}
icons = ['mouse-pointer', 'camera', 'info', 'map-marker']
tooltips = ['Default pointer',
'Save map as HTML or image', 'Inspector', 'Plotting']
icon_width = '42px'
icon_height = '40px'
n_cols = 2
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(children=[widgets.ToggleButton(layout=widgets.Layout(width='auto', height='auto'),
button_style='primary', icon=icons[i], tooltip=tooltips[i]) for i in range(len(icons))],
layout=widgets.Layout(
width='90px',
grid_template_columns=(icon_width + ' ') * 2,
grid_template_rows=(icon_height + ' ') * n_rows,
grid_gap='1px 1px')
)
self.toolbar = toolbar_grid
def tool_callback(change):
if change['new']:
current_tool = change['owner']
for tool in toolbar_grid.children:
if not tool is current_tool:
tool.value = False
tool = change['owner']
if tools[tool.icon] == 'to_image':
with tool_output:
tool_output.clear_output()
display(save_map_widget)
else:
tool_output.clear_output()
save_map_widget.children = [save_type, file_chooser]
for tool in toolbar_grid.children:
tool.observe(tool_callback, 'value')
toolbar_button = widgets.ToggleButton(
value=False,
tooltip='Toolbar',
icon='wrench'
)
toolbar_button.layout.width = '37px'
self.toolbar_button = toolbar_button
def toolbar_btn_click(change):
if change['new']:
toolbar_widget.children = [toolbar_button, toolbar_grid]
else:
toolbar_widget.children = [toolbar_button]
tool_output.clear_output()
self.toolbar_reset()
toolbar_button.observe(toolbar_btn_click, 'value')
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_control = WidgetControl(
widget=toolbar_widget, position='topright')
self.add_control(toolbar_control)
tool_output_control = WidgetControl(
widget=tool_output, position='topright')
self.add_control(tool_output_control)
def handle_interaction(**kwargs):
latlon = kwargs.get('coordinates')
if kwargs.get('type') == 'click' and self.inspector_checked:
self.default_style = {'cursor': 'wait'}
sample_scale = self.getScale()
layers = self.ee_layers
with output:
output.clear_output(wait=True)
for index, ee_object in enumerate(layers):
xy = ee.Geometry.Point(latlon[::-1])
layer_names = self.ee_layer_names
layer_name = layer_names[index]
object_type = ee_object.__class__.__name__
try:
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) \
or isinstance(ee_object, ee.featurecollection.FeatureCollection):
ee_object = ee.FeatureCollection(ee_object)
if isinstance(ee_object, ee.Image):
item = ee_object.reduceRegion(
ee.Reducer.first(), xy, sample_scale).getInfo()
b_name = 'band'
if len(item) > 1:
b_name = 'bands'
print("{}: {} ({} {})".format(
layer_name, object_type, len(item), b_name))
keys = item.keys()
for key in keys:
print(" {}: {}".format(key, item[key]))
elif isinstance(ee_object, ee.FeatureCollection):
filtered = ee_object.filterBounds(xy)
size = filtered.size().getInfo()
if size > 0:
first = filtered.first()
props = first.toDictionary().getInfo()
b_name = 'property'
if len(props) > 1:
b_name = 'properties'
print("{}: Feature ({} {})".format(
layer_name, len(props), b_name))
keys = props.keys()
for key in keys:
print(" {}: {}".format(
key, props[key]))
except Exception as e:
print(e)
self.default_style = {'cursor': 'crosshair'}
if kwargs.get('type') == 'click' and self.plot_checked and len(self.ee_raster_layers) > 0:
plot_layer_name = self.plot_dropdown_widget.value
layer_names = self.ee_raster_layer_names
layers = self.ee_raster_layers
index = layer_names.index(plot_layer_name)
ee_object = layers[index]
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
try:
self.default_style = {'cursor': 'wait'}
plot_options = self.plot_options
sample_scale = self.getScale()
if'sample_scale' in plot_options.keys() and (plot_options['sample_scale'] is not None):
sample_scale = plot_options['sample_scale']
if 'title' not in plot_options.keys():
plot_options['title'] = plot_layer_name
if ('add_marker_cluster' in plot_options.keys()) and plot_options['add_marker_cluster']:
plot_coordinates = self.plot_coordinates
markers = self.plot_markers
marker_cluster = self.plot_marker_cluster
plot_coordinates.append(latlon)
self.plot_last_click = latlon
self.plot_all_clicks = plot_coordinates
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
self.plot_marker_cluster = marker_cluster
band_names = ee_object.bandNames().getInfo()
self.chart_labels = band_names
if self.roi_end:
if self.roi_reducer_scale is None:
scale = ee_object.select(
0).projection().nominalScale()
else:
scale = self.roi_reducer_scale
dict_values = ee_object.reduceRegion(
reducer=self.roi_reducer, geometry=self.user_roi, scale=scale, bestEffort=True).getInfo()
self.chart_points.append(
self.user_roi.centroid(1).coordinates().getInfo())
else:
xy = ee.Geometry.Point(latlon[::-1])
dict_values = ee_object.sample(
xy, scale=sample_scale).first().toDictionary().getInfo()
self.chart_points.append(xy.coordinates().getInfo())
band_values = list(dict_values.values())
self.chart_values.append(band_values)
self.plot(band_names, band_values, **plot_options)
if plot_options['title'] == plot_layer_name:
del plot_options['title']
self.default_style = {'cursor': 'crosshair'}
self.roi_end = False
except Exception as e:
if self.plot_widget is not None:
with self.plot_widget:
self.plot_widget.clear_output()
print("No data for the clicked location.")
else:
print(e)
self.default_style = {'cursor': 'crosshair'}
self.roi_end = False
self.on_interaction(handle_interaction)
def set_options(self, mapTypeId='HYBRID', styles=None, types=None):
"""Adds Google basemap and controls to the ipyleaflet map.
Args:
mapTypeId (str, optional): A mapTypeId to set the basemap to. Can be one of "ROADMAP", "SATELLITE", "HYBRID" or "TERRAIN" to select one of the standard Google Maps API map types. Defaults to 'HYBRID'.
styles (object, optional): A dictionary of custom MapTypeStyle objects keyed with a name that will appear in the map's Map Type Controls. Defaults to None.
types (list, optional): A list of mapTypeIds to make available. If omitted, but opt_styles is specified, appends all of the style keys to the standard Google Maps API map types.. Defaults to None.
"""
self.clear_layers()
self.clear_controls()
self.scroll_wheel_zoom = True
self.add_control(ZoomControl(position='topleft'))
self.add_control(LayersControl(position='topright'))
self.add_control(ScaleControl(position='bottomleft'))
self.add_control(FullScreenControl())
self.add_control(DrawControl())
measure = MeasureControl(
position='bottomleft',
active_color='orange',
primary_length_unit='kilometers'
)
self.add_control(measure)
try:
self.add_layer(ee_basemaps[mapTypeId])
except Exception as e:
print(e)
print(
'Google basemaps can only be one of "ROADMAP", "SATELLITE", "HYBRID" or "TERRAIN".')
setOptions = set_options
def add_ee_layer(self, ee_object, vis_params={}, name=None, shown=True, opacity=1.0):
"""Adds a given EE object to the map as a layer.
Args:
ee_object (Collection|Feature|Image|MapId): The object to add to the map.
vis_params (dict, optional): The visualization parameters. Defaults to {}.
name (str, optional): The name of the layer. Defaults to 'Layer N'.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.
"""
image = None
if name is None:
layer_count = len(self.layers)
name = 'Layer ' + str(layer_count + 1)
if not isinstance(ee_object, ee.Image) and not isinstance(ee_object, ee.ImageCollection) and not isinstance(ee_object, ee.FeatureCollection) and not isinstance(ee_object, ee.Feature) and not isinstance(ee_object, ee.Geometry):
err_str = "\n\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):
features = ee.FeatureCollection(ee_object)
width = 2
if 'width' in vis_params:
width = vis_params['width']
color = '000000'
if 'color' in vis_params:
color = vis_params['color']
image_fill = features.style(
**{'fillColor': color}).updateMask(ee.Image.constant(0.5))
image_outline = features.style(
**{'color': color, 'fillColor': '00000000', 'width': width})
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
map_id_dict = ee.Image(image).getMapId(vis_params)
tile_layer = ipyleaflet.TileLayer(
url=map_id_dict['tile_fetcher'].url_format,
attribution='Google Earth Engine',
name=name,
opacity=opacity,
visible=True
# visible=shown
)
layer = self.find_layer(name=name)
if layer is not None:
existing_object = self.ee_layer_dict[name]['ee_object']
if isinstance(existing_object, ee.Image) or isinstance(existing_object, ee.ImageCollection):
self.ee_raster_layers.remove(existing_object)
self.ee_raster_layer_names.remove(name)
if self.plot_dropdown_widget is not None:
self.plot_dropdown_widget.options = list(
self.ee_raster_layer_names)
self.ee_layers.remove(existing_object)
self.ee_layer_names.remove(name)
self.remove_layer(layer)
self.ee_layers.append(ee_object)
self.ee_layer_names.append(name)
self.ee_layer_dict[name] = {
'ee_object': ee_object, 'ee_layer': tile_layer}
self.add_layer(tile_layer)
if isinstance(ee_object, ee.Image) or isinstance(ee_object, ee.ImageCollection):
self.ee_raster_layers.append(ee_object)
self.ee_raster_layer_names.append(name)
if self.plot_dropdown_widget is not None:
self.plot_dropdown_widget.options = list(
self.ee_raster_layer_names)
addLayer = add_ee_layer
def set_center(self, lon, lat, zoom=None):
"""Centers the map view at a given coordinates with the given zoom level.
Args:
lon (float): The longitude of the center, in degrees.
lat (float): The latitude of the center, in degrees.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to None.
"""
self.center = (lat, lon)
if zoom is not None:
self.zoom = zoom
setCenter = set_center
def center_object(self, ee_object, zoom=None):
"""Centers the map view on a given object.
Args:
ee_object (Element|Geometry): An Earth Engine object to center on - a geometry, image or feature.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to None.
"""
lat = 0
lon = 0
bounds = [[lat, lon], [lat, lon]]
if isinstance(ee_object, ee.geometry.Geometry):
centroid = ee_object.centroid(1)
lon, lat = centroid.getInfo()['coordinates']
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.feature.Feature):
centroid = ee_object.geometry().centroid(1)
lon, lat = centroid.getInfo()['coordinates']
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.featurecollection.FeatureCollection):
centroid = ee_object.geometry().centroid()
lon, lat = centroid.getInfo()['coordinates']
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.image.Image):
geometry = ee_object.geometry()
coordinates = geometry.getInfo()['coordinates'][0]
bounds = [coordinates[0][::-1], coordinates[2][::-1]]
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
geometry = ee_object.geometry()
coordinates = geometry.getInfo()['coordinates'][0]
bounds = [coordinates[0][::-1], coordinates[2][::-1]]
else:
bounds = [[0, 0], [0, 0]]
lat = bounds[0][0]
lon = bounds[0][1]
self.setCenter(lon, lat, zoom)
centerObject = center_object
def get_scale(self):
"""Returns the approximate pixel scale of the current map view, in meters.
Returns:
float: Map resolution in meters.
"""
zoom_level = self.zoom
# Reference: https://blogs.bing.com/maps/2006/02/25/map-control-zoom-levels-gt-resolution
resolution = 156543.04 * math.cos(0) / math.pow(2, zoom_level)
return resolution
getScale = get_scale
def add_basemap(self, basemap='HYBRID'):
"""Adds a basemap to the map.
Args:
basemap (str, optional): Can be one of string from ee_basemaps. Defaults to 'HYBRID'.
"""
try:
self.add_layer(ee_basemaps[basemap])
except Exception as e:
print(e)
print('Basemap can only be one of the following:\n {}'.format(
'\n '.join(ee_basemaps.keys())))
def find_layer(self, name):
"""Finds layer by name
Args:
name (str): Name of the layer to find.
Returns:
object: ipyleaflet layer object.
"""
layers = self.layers
for layer in layers:
if layer.name == name:
return layer
return None
def layer_opacity(self, name, value=1.0):
"""Changes layer opacity.
Args:
name (str): The name of the layer to change opacity.
value (float, optional): The opacity value to set. Defaults to 1.0.
"""
layer = self.find_layer(name)
try:
layer.opacity = value
# layer.interact(opacity=(0, 1, 0.1)) # to change layer opacity interactively
except Exception as e:
print(e)
def add_wms_layer(self, url, layers, name=None, attribution='', format='image/jpeg', transparent=False, opacity=1.0, shown=True):
"""Add a WMS layer to the map.
Args:
url (str): The URL of the WMS web service.
layers (str): Comma-separated list of WMS layers to show.
name (str, optional): The layer name to use on the layer control. Defaults to None.
attribution (str, optional): The attribution of the data layer. Defaults to ''.
format (str, optional): WMS image format (use ‘image/png’ for layers with transparency). Defaults to 'image/jpeg'.
transparent (bool, optional): If True, the WMS service will return images with transparency. Defaults to False.
opacity (float, optional): The opacity of the layer. Defaults to 1.0.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
"""
if name is None:
name = str(layers)
try:
wms_layer = ipyleaflet.WMSLayer(
url=url,
layers=layers,
name=name,
attribution=attribution,
format=format,
transparent=transparent,
opacity=opacity,
visible=True
# visible=shown
)
self.add_layer(wms_layer)
except Exception as e:
print(e)
print("Failed to add the specified WMS TileLayer.")
def add_tile_layer(self, url='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', name=None, attribution='', opacity=1.0, shown=True):
"""Adds a TileLayer to the map.
Args:
url (str, optional): The URL of the tile layer. Defaults to 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'.
name (str, optional): The layer name to use for the layer. Defaults to None.
attribution (str, optional): The attribution to use. Defaults to ''.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
"""
try:
tile_layer = ipyleaflet.TileLayer(
url=url,
name=name,
attribution=attribution,
opacity=opacity,
visible=True
# visible=shown
)
self.add_layer(tile_layer)
except Exception as e:
print(e)
print("Failed to add the specified TileLayer.")
def add_minimap(self, zoom=5, position="bottomright"):
"""Adds a minimap (overview) to the ipyleaflet map.
Args:
zoom (int, optional): Initial map zoom level. Defaults to 5.
position (str, optional): Position of the minimap. Defaults to "bottomright".
"""
minimap = ipyleaflet.Map(
zoom_control=False, attribution_control=False,
zoom=5, center=self.center, layers=[ee_basemaps['ROADMAP']]
)
minimap.layout.width = '150px'
minimap.layout.height = '150px'
link((minimap, 'center'), (self, 'center'))
minimap_control = WidgetControl(widget=minimap, position=position)
self.add_control(minimap_control)
def marker_cluster(self):
"""Adds a marker cluster to the map and returns a list of ee.Feature, which can be accessed using Map.ee_marker_cluster.
Returns:
object: a list of ee.Feature
"""
coordinates = []
markers = []
marker_cluster = MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
self.ee_markers = []
self.add_layer(marker_cluster)
def handle_interaction(**kwargs):
latlon = kwargs.get('coordinates')
if kwargs.get('type') == 'click':
coordinates.append(latlon)
geom = ee.Geometry.Point(latlon[1], latlon[0])
feature = ee.Feature(geom)
self.ee_markers.append(feature)
self.last_click = latlon
self.all_clicks = coordinates
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
elif kwargs.get('type') == 'mousemove':
pass
# cursor style: https://www.w3schools.com/cssref/pr_class_cursor.asp
self.default_style = {'cursor': 'crosshair'}
self.on_interaction(handle_interaction)
def set_plot_options(self, add_marker_cluster=False, sample_scale=None, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):
"""Sets plotting options.
Args:
add_marker_cluster (bool, optional): Whether to add a marker cluster. Defaults to False.
sample_scale (float, optional): A nominal scale in meters of the projection to sample in . Defaults to None.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
plot_options_dict = {}
plot_options_dict['add_marker_cluster'] = add_marker_cluster
plot_options_dict['sample_scale'] = sample_scale
plot_options_dict['plot_type'] = plot_type
plot_options_dict['overlay'] = overlay
plot_options_dict['position'] = position
plot_options_dict['min_width'] = min_width
plot_options_dict['max_width'] = max_width
plot_options_dict['min_height'] = min_height
plot_options_dict['max_height'] = max_height
for key in kwargs.keys():
plot_options_dict[key] = kwargs[key]
self.plot_options = plot_options_dict
if add_marker_cluster and (self.plot_marker_cluster not in self.layers):
self.add_layer(self.plot_marker_cluster)
def plot(self, x, y, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):
"""Creates a plot based on x-array and y-array data.
Args:
x (numpy.ndarray or list): The x-coordinates of the plotted line.
y (numpy.ndarray or list): The y-coordinates of the plotted line.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
if self.plot_widget is not None:
plot_widget = self.plot_widget
else:
plot_widget = widgets.Output(layout={'border': '1px solid black'})
plot_control = WidgetControl(widget=plot_widget, position=position, min_width=min_width,
max_width=max_width, min_height=min_height, max_height=max_height)
self.plot_widget = plot_widget
self.plot_control = plot_control
self.add_control(plot_control)
if max_width is None:
max_width = 500
if max_height is None:
max_height = 300
if (plot_type is None) and ('markers' not in kwargs.keys()):
kwargs['markers'] = 'circle'
with plot_widget:
try:
fig = plt.figure(1, **kwargs)
if max_width is not None:
fig.layout.width = str(max_width) + 'px'
if max_height is not None:
fig.layout.height = str(max_height) + 'px'
plot_widget.clear_output(wait=True)
if not overlay:
plt.clear()
if plot_type is None:
if 'marker' not in kwargs.keys():
kwargs['marker'] = 'circle'
plt.plot(x, y, **kwargs)
elif plot_type == 'bar':
plt.bar(x, y, **kwargs)
elif plot_type == 'scatter':
plt.scatter(x, y, **kwargs)
elif plot_type == 'hist':
plt.hist(y, **kwargs)
plt.show()
except Exception as e:
print(e)
print("Failed to create plot.")
def plot_demo(self, iterations=20, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):
"""A demo of interactive plotting using random pixel coordinates.
Args:
iterations (int, optional): How many iterations to run for the demo. Defaults to 20.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
import numpy as np
import time
if self.random_marker is not None:
self.remove_layer(self.random_marker)
image = ee.Image('LE7_TOA_5YEAR/1999_2003').select([0, 1, 2, 3, 4, 6])
self.addLayer(
image, {'bands': ['B4', 'B3', 'B2'], 'gamma': 1.4}, "LE7_TOA_5YEAR/1999_2003")
self.setCenter(-50.078877, 25.190030, 3)
band_names = image.bandNames().getInfo()
band_count = len(band_names)
latitudes = np.random.uniform(30, 48, size=iterations)
longitudes = np.random.uniform(-121, -76, size=iterations)
marker = Marker(location=(0, 0))
self.random_marker = marker
self.add_layer(marker)
for i in range(iterations):
try:
coordinate = ee.Geometry.Point([longitudes[i], latitudes[i]])
dict_values = image.sample(
coordinate).first().toDictionary().getInfo()
band_values = list(dict_values.values())
title = '{}/{}: Spectral signature at ({}, {})'.format(i+1, iterations,
round(latitudes[i], 2), round(longitudes[i], 2))
marker.location = (latitudes[i], longitudes[i])
self.plot(band_names, band_values, plot_type=plot_type, overlay=overlay,
min_width=min_width, max_width=max_width, min_height=min_height, max_height=max_height, title=title, **kwargs)
time.sleep(0.3)
except Exception as e:
print(e)
def plot_raster(self, ee_object=None, sample_scale=None, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):
"""Interactive plotting of Earth Engine data by clicking on the map.
Args:
ee_object (object, optional): The ee.Image or ee.ImageCollection to sample. Defaults to None.
sample_scale (float, optional): A nominal scale in meters of the projection to sample in. Defaults to None.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
if self.plot_control is not None:
del self.plot_widget
self.remove_control(self.plot_control)
if self.random_marker is not None:
self.remove_layer(self.random_marker)
plot_widget = widgets.Output(layout={'border': '1px solid black'})
plot_control = WidgetControl(widget=plot_widget, position=position, min_width=min_width,
max_width=max_width, min_height=min_height, max_height=max_height)
self.plot_widget = plot_widget
self.plot_control = plot_control
self.add_control(plot_control)
self.default_style = {'cursor': 'crosshair'}
msg = "The plot function can only be used on ee.Image or ee.ImageCollection with more than one band."
if (ee_object is None) and len(self.ee_raster_layers) > 0:
ee_object = self.ee_raster_layers[-1]
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif not isinstance(ee_object, ee.Image):
print(msg)
return
if sample_scale is None:
sample_scale = self.getScale()
if max_width is None:
max_width = 500
band_names = ee_object.bandNames().getInfo()
coordinates = []
markers = []
marker_cluster = MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
self.add_layer(marker_cluster)
def handle_interaction(**kwargs2):
latlon = kwargs2.get('coordinates')
if kwargs2.get('type') == 'click':
try:
coordinates.append(latlon)
self.last_click = latlon
self.all_clicks = coordinates
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
self.default_style = {'cursor': 'wait'}
xy = ee.Geometry.Point(latlon[::-1])
dict_values = ee_object.sample(
xy, scale=sample_scale).first().toDictionary().getInfo()
band_values = list(dict_values.values())
self.plot(band_names, band_values, plot_type=plot_type, overlay=overlay,
min_width=min_width, max_width=max_width, min_height=min_height, max_height=max_height, **kwargs)
self.default_style = {'cursor': 'crosshair'}
except Exception as e:
if self.plot_widget is not None:
with self.plot_widget:
self.plot_widget.clear_output()
print("No data for the clicked location.")
else:
print(e)
self.default_style = {'cursor': 'crosshair'}
self.on_interaction(handle_interaction)
def add_maker_cluster(self, event='click', add_marker=True):
"""Captures user inputs and add markers to the map.
Args:
event (str, optional): [description]. Defaults to 'click'.
add_marker (bool, optional): If True, add markers to the map. Defaults to True.
Returns:
object: a marker cluster.
"""
coordinates = []
markers = []
marker_cluster = MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
if add_marker:
self.add_layer(marker_cluster)
def handle_interaction(**kwargs):
latlon = kwargs.get('coordinates')
if event == 'click' and kwargs.get('type') == 'click':
coordinates.append(latlon)
self.last_click = latlon
self.all_clicks = coordinates
if add_marker:
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
elif kwargs.get('type') == 'mousemove':
pass
# cursor style: https://www.w3schools.com/cssref/pr_class_cursor.asp
self.default_style = {'cursor': 'crosshair'}
self.on_interaction(handle_interaction)
def set_control_visibility(self, layerControl=True, fullscreenControl=True, latLngPopup=True):
"""Sets the visibility of the controls on the map.
Args:
layerControl (bool, optional): Whether to show the control that allows the user to toggle layers on/off. Defaults to True.
fullscreenControl (bool, optional): Whether to show the control that allows the user to make the map full-screen. Defaults to True.
latLngPopup (bool, optional): Whether to show the control that pops up the Lat/lon when the user clicks on the map. Defaults to True.
"""
pass
setControlVisibility = set_control_visibility
def add_layer_control(self):
"""Adds the layer control to the map.
"""
pass
addLayerControl = add_layer_control
def split_map(self, left_layer='HYBRID', right_layer='ESRI'):
"""Adds split map.
Args:
left_layer (str, optional): The layer tile layer. Defaults to 'HYBRID'.
right_layer (str, optional): The right tile layer. Defaults to 'ESRI'.
"""
try:
self.remove_control(self.layer_control)
self.remove_control(self.inspector_control)
if left_layer in ee_basemaps.keys():
left_layer = ee_basemaps[left_layer]
if right_layer in ee_basemaps.keys():
right_layer = ee_basemaps[right_layer]
control = ipyleaflet.SplitMapControl(
left_layer=left_layer, right_layer=right_layer)
self.add_control(control)
except Exception as e:
print(e)
print('The provided layers are invalid!')
def ts_inspector(self, left_ts, right_ts, left_names, right_names, left_vis={}, right_vis={}):
"""Creates a split-panel map for inspecting timeseries images.
Args:
left_ts (object): An ee.ImageCollection to show on the left panel.
right_ts (object): An ee.ImageCollection to show on the right panel.
left_names (list): A list of names to show under the left dropdown.
right_names (list): A list of names to show under the right dropdown.
left_vis (dict, optional): Visualization parameters for the left layer. Defaults to {}.
right_vis (dict, optional): Visualization parameters for the right layer. Defaults to {}.
"""
left_count = int(left_ts.size().getInfo())
right_count = int(right_ts.size().getInfo())
if left_count != len(left_names):
print(
'The number of images in left_ts must match the number of layer names in left_names.')
return
if right_count != len(right_names):
print(
'The number of images in right_ts must match the number of layer names in right_names.')
return
left_layer = TileLayer(
url='https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}',
attribution='Google',
name='Google Maps'
)
right_layer = TileLayer(
url='https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}',
attribution='Google',
name='Google Maps'
)
self.clear_controls()
left_dropdown = widgets.Dropdown(options=left_names, value=None)
right_dropdown = widgets.Dropdown(options=right_names, value=None)
left_dropdown.layout.max_width = '130px'
right_dropdown.layout.max_width = '130px'
left_control = WidgetControl(widget=left_dropdown, position='topleft')
right_control = WidgetControl(
widget=right_dropdown, position='topright')
self.add_control(control=left_control)
self.add_control(control=right_control)
self.add_control(ZoomControl(position='topleft'))
self.add_control(ScaleControl(position='bottomleft'))
self.add_control(FullScreenControl())
def left_dropdown_change(change):
left_dropdown_index = left_dropdown.index
if left_dropdown_index is not None and left_dropdown_index >= 0:
try:
if isinstance(left_ts, ee.ImageCollection):
left_image = left_ts.toList(
left_ts.size()).get(left_dropdown_index)
elif isinstance(left_ts, ee.List):
left_image = left_ts.get(left_dropdown_index)
else:
print('The left_ts argument must be an ImageCollection.')
return
if isinstance(left_image, ee.ImageCollection):
left_image = ee.Image(left_image.mosaic())
elif isinstance(left_image, ee.Image):
pass
else:
left_image = ee.Image(left_image)
left_image = ee_tile_layer(
left_image, left_vis, left_names[left_dropdown_index])
left_layer.url = left_image.url
except Exception as e:
print(e)
return
left_dropdown.observe(left_dropdown_change, names='value')
def right_dropdown_change(change):
right_dropdown_index = right_dropdown.index
if right_dropdown_index is not None and right_dropdown_index >= 0:
try:
if isinstance(right_ts, ee.ImageCollection):
right_image = right_ts.toList(
left_ts.size()).get(right_dropdown_index)
elif isinstance(right_ts, ee.List):
right_image = right_ts.get(right_dropdown_index)
else:
print('The left_ts argument must be an ImageCollection.')
return
if isinstance(right_image, ee.ImageCollection):
right_image = ee.Image(right_image.mosaic())
elif isinstance(right_image, ee.Image):
pass
else:
right_image = ee.Image(right_image)
right_image = ee_tile_layer(
right_image, right_vis, right_names[right_dropdown_index])
right_layer.url = right_image.url
except Exception as e:
print(e)
return
right_dropdown.observe(right_dropdown_change, names='value')
try:
split_control = ipyleaflet.SplitMapControl(
left_layer=left_layer, right_layer=right_layer)
self.add_control(split_control)
except Exception as e:
print(e)
def basemap_demo(self):
"""A demo for using geemap basemaps.
"""
dropdown = widgets.Dropdown(
options=list(ee_basemaps.keys()),
value='HYBRID',
description='Basemaps'
)
def on_click(change):
basemap_name = change['new']
old_basemap = self.layers[-1]
self.substitute_layer(old_basemap, ee_basemaps[basemap_name])
dropdown.observe(on_click, 'value')
basemap_control = WidgetControl(widget=dropdown, position='topright')
self.remove_control(self.inspector_control)
# self.remove_control(self.layer_control)
self.add_control(basemap_control)
def add_legend(self, legend_title='Legend', legend_dict=None, legend_keys=None, legend_colors=None, position='bottomright', builtin_legend=None, **kwargs):
"""Adds a customized basemap to the map.
Args:
legend_title (str, optional): Title of the legend. Defaults to 'Legend'.
legend_dict (dict, optional): A dictionary containing legend items as keys and color as values. If provided, legend_keys and legend_colors will be ignored. Defaults to None.
legend_keys (list, optional): A list of legend keys. Defaults to None.
legend_colors (list, optional): A list of legend colors. Defaults to None.
position (str, optional): Position of the legend. Defaults to 'bottomright'.
builtin_legend (str, optional): Name of the builtin legend to add to the map. Defaults to None.
"""
import pkg_resources
from IPython.display import display
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py"))
legend_template = os.path.join(pkg_dir, 'data/template/legend.html')
# print(kwargs['min_height'])
if 'min_width' not in kwargs.keys():
min_width = None
else:
min_wdith = kwargs['min_width']
if 'max_width' not in kwargs.keys():
max_width = None
else:
max_width = kwargs['max_width']
if 'min_height' not in kwargs.keys():
min_height = None
else:
min_height = kwargs['min_height']
if 'max_height' not in kwargs.keys():
max_height = None
else:
max_height = kwargs['max_height']
if 'height' not in kwargs.keys():
height = None
else:
height = kwargs['height']
if 'width' not in kwargs.keys():
width = None
else:
width = kwargs['width']
if width is None:
max_width = '300px'
if height is None:
max_height = '400px'
if not os.path.exists(legend_template):
print('The legend template does not exist.')
return
if legend_keys is not None:
if not isinstance(legend_keys, list):
print('The legend keys must be a list.')
return
else:
legend_keys = ['One', 'Two', 'Three', 'Four', 'ect']
if legend_colors is not None:
if not isinstance(legend_colors, list):
print('The legend colors must be a list.')
return
elif all(isinstance(item, tuple) for item in legend_colors):
try:
legend_colors = [rgb_to_hex(x) for x in legend_colors]
except Exception as e:
print(e)
elif all((item.startswith('#') and len(item) == 7) for item in legend_colors):
pass
elif all((len(item) == 6) for item in legend_colors):
pass
else:
print('The legend colors must be a list of tuples.')
return
else:
legend_colors = ['#8DD3C7', '#FFFFB3',
'#BEBADA', '#FB8072', '#80B1D3']
if len(legend_keys) != len(legend_colors):
print('The legend keys and values must be the same length.')
return
allowed_builtin_legends = builtin_legends.keys()
if builtin_legend is not None:
# builtin_legend = builtin_legend.upper()
if builtin_legend not in allowed_builtin_legends:
print('The builtin legend must be one of the following: {}'.format(
', '.join(allowed_builtin_legends)))
return
else:
legend_dict = builtin_legends[builtin_legend]
legend_keys = list(legend_dict.keys())
legend_colors = list(legend_dict.values())
if legend_dict is not None:
if not isinstance(legend_dict, dict):
print('The legend dict must be a dictionary.')
return
else:
legend_keys = list(legend_dict.keys())
legend_colors = list(legend_dict.values())
if all(isinstance(item, tuple) for item in legend_colors):
try:
legend_colors = [rgb_to_hex(x) for x in legend_colors]
except Exception as e:
print(e)
allowed_positions = ['topleft', 'topright',
'bottomleft', 'bottomright']
if position not in allowed_positions:
print('The position must be one of the following: {}'.format(
', '.join(allowed_positions)))
return
header = []
content = []
footer = []
with open(legend_template) as f:
lines = f.readlines()
lines[3] = lines[3].replace('Legend', legend_title)
header = lines[:6]
footer = lines[11:]
for index, key in enumerate(legend_keys):
color = legend_colors[index]
if not color.startswith('#'):
color = '#' + color
item = " <li><span style='background:{};'></span>{}</li>\n".format(
color, key)
content.append(item)
legend_html = header + content + footer
legend_text = ''.join(legend_html)
try:
if self.legend_control is not None:
legend_widget = self.legend_widget
legend_widget.close()
self.remove_control(self.legend_control)
legend_output_widget = widgets.Output(
layout={'border': '1px solid black', 'max_width': max_width, 'min_width': min_width, 'max_height': max_height,
'min_height': min_height, 'height': height, 'width': width, 'overflow': 'scroll'})
legend_control = WidgetControl(
widget=legend_output_widget, position=position)
legend_widget = widgets.HTML(value=legend_text)
with legend_output_widget:
display(legend_widget)
self.legend_widget = legend_output_widget
self.legend_control = legend_control
self.add_control(legend_control)
except Exception as e:
print(e)
def image_overlay(self, url, bounds, name):
"""Overlays an image from the Internet or locally on the map.
Args:
url (str): http URL or local file path to the image.
bounds (tuple): bounding box of the image in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -100)).
name (str): name of the layer to show on the layer control.
"""
from base64 import b64encode
from PIL import Image, ImageSequence
from io import BytesIO
try:
if not url.startswith('http'):
if not os.path.exists(url):
print('The provided file does not exist.')
return
ext = os.path.splitext(url)[1][1:] # file extension
image = Image.open(url)
f = BytesIO()
if ext.lower() == 'gif':
frames = []
# Loop over each frame in the animated image
for frame in ImageSequence.Iterator(image):
frame = frame.convert('RGBA')
b = BytesIO()
frame.save(b, format="gif")
frame = Image.open(b)
frames.append(frame)
frames[0].save(f, format='GIF', save_all=True,
append_images=frames[1:], loop=0)
else:
image.save(f, ext)
data = b64encode(f.getvalue())
data = data.decode('ascii')
url = 'data:image/{};base64,'.format(ext) + data
img = ipyleaflet.ImageOverlay(url=url, bounds=bounds, name=name)
self.add_layer(img)
except Exception as e:
print(e)
def video_overlay(self, url, bounds, name):
"""Overlays a video from the Internet on the map.
Args:
url (str): http URL of the video, such as "https://www.mapbox.com/bites/00188/patricia_nasa.webm"
bounds (tuple): bounding box of the video in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -100)).
name (str): name of the layer to show on the layer control.
"""
try:
video = ipyleaflet.VideoOverlay(url=url, bounds=bounds, name=name)
self.add_layer(video)
except Exception as e:
print(e)
def add_landsat_ts_gif(self, layer_name='Timelapse', roi=None, label=None, start_year=1984, end_year=2019, start_date='06-10', end_date='09-20', bands=['NIR', 'Red', 'Green'], vis_params=None, dimensions=768, frames_per_second=10, font_size=30, font_color='white', add_progress_bar=True, progress_bar_color='white', progress_bar_height=5, out_gif=None, download=False, apply_fmask=True, nd_bands=None, nd_threshold=0, nd_palette=['black', 'blue']):
"""Adds a Landsat timelapse to the map.
Args:
layer_name (str, optional): Layer name to show under the layer control. Defaults to 'Timelapse'.
roi (object, optional): Region of interest to create the timelapse. Defaults to None.
label (str, optional): A label to shown on the GIF, such as place name. Defaults to None.
start_year (int, optional): Starting year for the timelapse. Defaults to 1984.
end_year (int, optional): Ending year for the timelapse. Defaults to 2019.
start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.
end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.
bands (list, optional): Three bands selected from ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']. Defaults to ['NIR', 'Red', 'Green'].
vis_params (dict, optional): Visualization parameters. Defaults to None.
dimensions (int, optional): a number or pair of numbers in format WIDTHxHEIGHT) Maximum dimensions of the thumbnail to render, in pixels. If only one number is passed, it is used as the maximum, and the other dimension is computed by proportional scaling. Defaults to 768.
frames_per_second (int, optional): Animation speed. Defaults to 10.
font_size (int, optional): Font size of the animated text and label. Defaults to 30.
font_color (str, optional): Font color of the animated text and label. Defaults to 'black'.
add_progress_bar (bool, optional): Whether to add a progress bar at the bottom of the GIF. Defaults to True.
progress_bar_color (str, optional): Color for the progress bar. Defaults to 'white'.
progress_bar_height (int, optional): Height of the progress bar. Defaults to 5.
out_gif (str, optional): File path to the output animated GIF. Defaults to None.
download (bool, optional): Whether to download the gif. Defaults to False.
apply_fmask (bool, optional): Whether to apply Fmask (Function of mask) for automated clouds, cloud shadows, snow, and water masking.
nd_bands (list, optional): A list of names specifying the bands to use, e.g., ['Green', 'SWIR1']. The normalized difference is computed as (first − second) / (first + second). Note that negative input values are forced to 0 so that the result is confined to the range (-1, 1).
nd_threshold (float, optional): The threshold for extacting pixels from the normalized difference band.
nd_palette (str, optional): The color palette to use for displaying the normalized difference band.
"""
try:
if roi is None:
if self.draw_last_feature is not None:
feature = self.draw_last_feature
roi = feature.geometry()
else:
roi = ee.Geometry.Polygon(
[[[-115.471773, 35.892718],
[-115.471773, 36.409454],
[-114.271283, 36.409454],
[-114.271283, 35.892718],
[-115.471773, 35.892718]]], None, False)
elif isinstance(roi, ee.Feature) or isinstance(roi, ee.FeatureCollection):
roi = roi.geometry()
elif isinstance(roi, ee.Geometry):
pass
else:
print('The provided roi is invalid. It must be an ee.Geometry')
return
geojson = ee_to_geojson(roi)
bounds = minimum_bounding_box(geojson)
geojson = adjust_longitude(geojson)
roi = ee.Geometry(geojson)
in_gif = landsat_ts_gif(roi=roi, out_gif=out_gif, start_year=start_year, end_year=end_year, start_date=start_date,
end_date=end_date, bands=bands, vis_params=vis_params, dimensions=dimensions, frames_per_second=frames_per_second, apply_fmask=apply_fmask, nd_bands=nd_bands, nd_threshold=nd_threshold, nd_palette=nd_palette)
in_nd_gif = in_gif.replace('.gif', '_nd.gif')
print('Adding animated text to GIF ...')
add_text_to_gif(in_gif, in_gif, xy=('2%', '2%'), text_sequence=start_year,
font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)
if nd_bands is not None:
add_text_to_gif(in_nd_gif, in_nd_gif, xy=('2%', '2%'), text_sequence=start_year,
font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)
if label is not None:
add_text_to_gif(in_gif, in_gif, xy=('2%', '90%'), text_sequence=label,
font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)
# if nd_bands is not None:
# add_text_to_gif(in_nd_gif, in_nd_gif, xy=('2%', '90%'), text_sequence=label,
# font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)
if is_tool('ffmpeg'):
reduce_gif_size(in_gif)
if nd_bands is not None:
reduce_gif_size(in_nd_gif)
print('Adding GIF to the map ...')
self.image_overlay(url=in_gif, bounds=bounds, name=layer_name)
if nd_bands is not None:
self.image_overlay(
url=in_nd_gif, bounds=bounds, name=layer_name+' ND')
print('The timelapse has been added to the map.')
if download:
link = create_download_link(
in_gif, title="Click here to download the timelapse: ")
display(link)
except Exception as e:
print(e)
def to_html(self, outfile, title='My Map', width='100%', height='880px'):
"""Saves the map as a HTML file.
Args:
outfile (str): The output file path to the HTML file.
title (str, optional): The title of the HTML file. Defaults to 'My Map'.
width (str, optional): The width of the map in pixels or percentage. Defaults to '100%'.
height (str, optional): The height of the map in pixels. Defaults to '880px'.
"""
try:
if not outfile.endswith('.html'):
print('The output file must end with .html')
return
out_dir = os.path.dirname(outfile)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
before_width = self.layout.width
before_height = self.layout.height
if not isinstance(width, str):
print("width must be a string.")
return
elif width.endswith('px') or width.endswith('%'):
pass
else:
print('width must end with px or %')
return
if not isinstance(height, str):
print("height must be a string.")
return
elif not height.endswith('px'):
print('height must end with px')
return
self.layout.width = width
self.layout.height = height
self.save(outfile, title=title)
self.layout.width = before_width
self.layout.height = before_height
except Exception as e:
print(e)
def to_image(self, outfile=None, monitor=1):
"""Saves the map as a PNG or JPG image.
Args:
outfile (str, optional): The output file path to the image. Defaults to None.
monitor (int, optional): The monitor to take the screenshot. Defaults to 1.
"""
if outfile is None:
outfile = os.path.join(os.getcwd(), 'my_map.png')
if outfile.endswith('.png') or outfile.endswith('.jpg'):
pass
else:
print('The output file must be a PNG or JPG image.')
return
work_dir = os.path.dirname(outfile)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
screenshot = screen_capture(outfile, monitor)
self.screenshot = screenshot
def toolbar_reset(self):
"""Reset the toolbar so that no tool is selected.
"""
toolbar_grid = self.toolbar
for tool in toolbar_grid.children:
tool.value = False
def add_raster(self, image, bands=None, layer_name=None, colormap=None, x_dim='x', y_dim='y'):
"""Adds a local raster dataset to the map.
Args:
image (str): The image file path.
bands (int or list, optional): The image bands to use. It can be either a nubmer (e.g., 1) or a list (e.g., [3, 2, 1]). Defaults to None.
layer_name (str, optional): The layer name to use for the raster. Defaults to None.
colormap (str, optional): The name of the colormap to use for the raster, such as 'gray' and 'terrain'. More can be found at https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html. Defaults to None.
x_dim (str, optional): The x dimension. Defaults to 'x'.
y_dim (str, optional): The y dimension. Defaults to 'y'.
"""
try:
import xarray_leaflet
except:
# import platform
# if platform.system() != "Windows":
# # install_from_github(
# # url='https://github.com/davidbrochart/xarray_leaflet')
# check_install('xarray_leaflet')
# import xarray_leaflet
# else:
print(
'You need to install xarray_leaflet first. See https://github.com/davidbrochart/xarray_leaflet')
print(
'Try the following to install xarray_leaflet: \n\nconda install -c conda-forge xarray_leaflet')
return
import warnings
import numpy as np
import rioxarray
import xarray as xr
import matplotlib.pyplot as plt
warnings.simplefilter('ignore')
if not os.path.exists(image):
print('The image file does not exist.')
return
if colormap is None:
colormap = plt.cm.inferno
if layer_name is None:
layer_name = 'Layer_' + random_string()
if isinstance(colormap, str):
colormap = plt.cm.get_cmap(name=colormap)
da = rioxarray.open_rasterio(image, masked=True)
# print(da.rio.nodata)
multi_band = False
if len(da.band) > 1:
multi_band = True
if bands is None:
bands = [3, 2, 1]
else:
bands = 1
if multi_band:
da = da.rio.write_nodata(0)
else:
da = da.rio.write_nodata(np.nan)
da = da.sel(band=bands)
# crs = da.rio.crs
# nan = da.attrs['nodatavals'][0]
# da = da / da.max()
# # if multi_band:
# da = xr.where(da == nan, np.nan, da)
# da = da.rio.write_nodata(0)
# da = da.rio.write_crs(crs)
if multi_band:
layer = da.leaflet.plot(
self, x_dim=x_dim, y_dim=y_dim, rgb_dim='band')
else:
layer = da.leaflet.plot(
self, x_dim=x_dim, y_dim=y_dim, colormap=colormap)
layer.name = layer_name
def remove_drawn_features(self):
"""Removes user-drawn geometries from the map
"""
if self.draw_layer is not None:
self.remove_layer(self.draw_layer)
self.draw_count = 0
self.draw_features = []
self.draw_last_feature = None
self.draw_layer = None
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = None
self.user_rois = None
self.chart_values = []
self.chart_points = []
self.chart_labels = None
def extract_values_to_points(self, filename):
"""Exports pixel values to a csv file based on user-drawn geometries.
Args:
filename (str): The output file path to the csv file or shapefile.
"""
import csv
filename = os.path.abspath(filename)
allowed_formats = ['csv', 'shp']
ext = filename[-3:]
if ext not in allowed_formats:
print('The output file must be one of the following: {}'.format(
', '.join(allowed_formats)))
return
out_dir = os.path.dirname(filename)
out_csv = filename[:-3] + 'csv'
out_shp = filename[:-3] + 'shp'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
count = len(self.chart_points)
out_list = []
if count > 0:
header = ['id', 'longitude', 'latitude'] + self.chart_labels
out_list.append(header)
for i in range(0, count):
id = i + 1
line = [id] + self.chart_points[i] + self.chart_values[i]
out_list.append(line)
with open(out_csv, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(out_list)
if ext == 'csv':
print('The csv file has been saved to: {}'.format(out_csv))
else:
csv_to_shp(out_csv, out_shp)
print('The shapefile has been saved to: {}'.format(out_shp))
# The functions below are outside the Map class.
def screen_capture(outfile, monitor=1):
"""Takes a full screenshot of the selected monitor.
Args:
outfile (str): The output file path to the screenshot.
monitor (int, optional): The monitor to take the screenshot. Defaults to 1.
"""
from mss import mss
out_dir = os.path.dirname(outfile)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not isinstance(monitor, int):
print('The monitor number must be an integer.')
return
try:
with mss() as sct:
sct.shot(output=outfile, mon=monitor)
return outfile
except Exception as e:
print(e)
def install_from_github(url):
"""Install a package from a GitHub repository.
Args:
url (str): The URL of the GitHub repository.
"""
try:
download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
if not os.path.exists(download_dir):
os.makedirs(download_dir)
repo_name = os.path.basename(url)
zip_url = os.path.join(url, 'archive/master.zip')
filename = repo_name + '-master.zip'
download_from_url(url=zip_url, out_file_name=filename,
out_dir=download_dir, unzip=True)
pkg_dir = os.path.join(download_dir, repo_name + '-master')
pkg_name = os.path.basename(url)
work_dir = os.getcwd()
os.chdir(pkg_dir)
print('Installing {}...'.format(pkg_name))
cmd = 'pip install .'
os.system(cmd)
os.chdir(work_dir)
print('{} has been installed successfully.'.format(pkg_name))
# print("\nPlease comment out 'install_from_github()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output")
except Exception as e:
print(e)
def rgb_to_hex(rgb=(255, 255, 255)):
"""Converts RGB to hex color. In RGB color R stands for Red, G stands for Green, and B stands for Blue, and it ranges from the decimal value of 0 – 255.
Args:
rgb (tuple, optional): RGB color code as a tuple of (red, green, blue). Defaults to (255, 255, 255).
Returns:
str: hex color code
"""
return '%02x%02x%02x' % rgb
def hex_to_rgb(value='FFFFFF'):
"""Converts hex color to RGB color.
Args:
value (str, optional): Hex color code as a string. Defaults to 'FFFFFF'.
Returns:
tuple: RGB color as a tuple.
"""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv//3], 16) for i in range(0, lv, lv//3))
def check_color(in_color):
"""Checks the input color and returns the corresponding hex color code.
Args:
in_color (str or tuple): It can be a string (e.g., 'red', '#ffff00') or tuple (e.g., (255, 127, 0)).
Returns:
str: A hex color code.
"""
out_color = '#000000' # default black color
if isinstance(in_color, tuple) and len(in_color) == 3:
if all(isinstance(item, int) for item in in_color):
rescaled_color = [x / 255.0 for x in in_color]
out_color = colour.Color(rgb=tuple(rescaled_color))
return out_color.hex_l
else:
print(
'RGB color must be a tuple with three integer values ranging from 0 to 255.')
return
else:
try:
out_color = colour.Color(in_color)
return out_color.hex_l
except Exception as e:
print('The provided color is invalid. Using the default black color.')
print(e)
return out_color
def system_fonts(show_full_path=False):
"""Gets a list of system fonts.
# Common font locations:
# Linux: /usr/share/fonts/TTF/
# Windows: C:\Windows\Fonts
# macOS: System > Library > Fonts
Args:
show_full_path (bool, optional): Whether to show the full path of each system font. Defaults to False.
Returns:
list: A list of system fonts.
"""
try:
import matplotlib.font_manager
font_list = matplotlib.font_manager.findSystemFonts(
fontpaths=None, fontext='ttf')
font_list.sort()
font_names = [os.path.basename(f) for f in font_list]
font_names.sort()
if show_full_path:
return font_list
else:
return font_names
except Exception as e:
print(e)
def add_text_to_gif(in_gif, out_gif, xy=None, text_sequence=None, font_type="arial.ttf", font_size=20, font_color='#000000', add_progress_bar=True, progress_bar_color='white', progress_bar_height=5, duration=100, loop=0):
"""Adds animated text to a GIF image.
Args:
in_gif (str): The file path to the input GIF image.
out_gif (str): The file path to the output GIF image.
xy (tuple, optional): Top left corner of the text. It can be formatted like this: (10, 10) or ('15%', '25%'). Defaults to None.
text_sequence (int, str, list, optional): Text to be drawn. It can be an integer number, a string, or a list of strings. Defaults to None.
font_type (str, optional): Font type. Defaults to "arial.ttf".
font_size (int, optional): Font size. Defaults to 20.
font_color (str, optional): Font color. It can be a string (e.g., 'red'), rgb tuple (e.g., (255, 127, 0)), or hex code (e.g., '#ff00ff'). Defaults to '#000000'.
add_progress_bar (bool, optional): Whether to add a progress bar at the bottom of the GIF. Defaults to True.
progress_bar_color (str, optional): Color for the progress bar. Defaults to 'white'.
progress_bar_height (int, optional): Height of the progress bar. Defaults to 5.
duration (int, optional): controls how long each frame will be displayed for, in milliseconds. It is the inverse of the frame rate. Setting it to 100 milliseconds gives 10 frames per second. You can decrease the duration to give a smoother animation.. Defaults to 100.
loop (int, optional): controls how many times the animation repeats. The default, 1, means that the animation will play once and then stop (displaying the last frame). A value of 0 means that the animation will repeat forever. Defaults to 0.
"""
import io
import pkg_resources
import warnings
from PIL import Image, ImageDraw, ImageSequence, ImageFont
warnings.simplefilter('ignore')
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py"))
default_font = os.path.join(pkg_dir, 'data/fonts/arial.ttf')
in_gif = os.path.abspath(in_gif)
out_gif = os.path.abspath(out_gif)
if not os.path.exists(in_gif):
print('The input gif file does not exist.')
return
if not os.path.exists(os.path.dirname(out_gif)):
os.makedirs(os.path.dirname(out_gif))
if font_type == 'arial.ttf':
font = ImageFont.truetype(default_font, font_size)
else:
try:
font_list = system_fonts(show_full_path=True)
font_names = [os.path.basename(f) for f in font_list]
if (font_type in font_list) or (font_type in font_names):
font = ImageFont.truetype(font_type, font_size)
else:
print(
'The specified font type could not be found on your system. Using the default font instead.')
font = ImageFont.truetype(default_font, font_size)
except Exception as e:
print(e)
font = ImageFont.truetype(default_font, font_size)
color = check_color(font_color)
progress_bar_color = check_color(progress_bar_color)
try:
image = Image.open(in_gif)
except Exception as e:
print('An error occurred while opening the gif.')
print(e)
return
count = image.n_frames
W, H = image.size
progress_bar_widths = [i * 1.0 / count * W for i in range(1, count + 1)]
progress_bar_shapes = [[(0, H - progress_bar_height), (x, H)]
for x in progress_bar_widths]
if xy is None:
# default text location is 5% width and 5% height of the image.
xy = (int(0.05 * W), int(0.05 * H))
elif (xy is not None) and (not isinstance(xy, tuple)) and (len(xy) == 2):
print("xy must be a tuple, e.g., (10, 10), ('10%', '10%')")
return
elif all(isinstance(item, int) for item in xy) and (len(xy) == 2):
x, y = xy
if (x > 0) and (x < W) and (y > 0) and (y < H):
pass
else:
print(
'xy is out of bounds. x must be within [0, {}], and y must be within [0, {}]'.format(W, H))
return
elif all(isinstance(item, str) for item in xy) and (len(xy) == 2):
x, y = xy
if ('%' in x) and ('%' in y):
try:
x = int(float(x.replace('%', '')) / 100.0 * W)
y = int(float(y.replace('%', '')) / 100.0 * H)
xy = (x, y)
except Exception as e:
print(
"The specified xy is invalid. It must be formatted like this ('10%', '10%')")
return
else:
print("The specified xy is invalid. It must be formatted like this: (10, 10) or ('10%', '10%')")
return
if text_sequence is None:
text = [str(x) for x in range(1, count + 1)]
elif isinstance(text_sequence, int):
text = [str(x) for x in range(
text_sequence, text_sequence + count + 1)]
elif isinstance(text_sequence, str):
try:
text_sequence = int(text_sequence)
text = [str(x) for x in range(
text_sequence, text_sequence + count + 1)]
except Exception as e:
text = [text_sequence] * count
elif isinstance(text_sequence, list) and len(text_sequence) != count:
print('The length of the text sequence must be equal to the number ({}) of frames in the gif.'.format(count))
return
else:
text = [str(x) for x in text_sequence]
try:
frames = []
# Loop over each frame in the animated image
for index, frame in enumerate(ImageSequence.Iterator(image)):
# Draw the text on the frame
frame = frame.convert('RGB')
draw = ImageDraw.Draw(frame)
# w, h = draw.textsize(text[index])
draw.text(xy, text[index], font=font, fill=color)
if add_progress_bar:
draw.rectangle(
progress_bar_shapes[index], fill=progress_bar_color)
del draw
b = io.BytesIO()
frame.save(b, format="GIF")
frame = Image.open(b)
frames.append(frame)
# https://www.pythoninformer.com/python-libraries/pillow/creating-animated-gif/
# Save the frames as a new image
frames[0].save(out_gif, save_all=True,
append_images=frames[1:], duration=duration, loop=loop, optimize=True)
except Exception as e:
print(e)
def open_image_from_url(url):
"""Loads an image from the specified URL.
Args:
url (str): URL of the image.
Returns:
object: Image object.
"""
from PIL import Image
import requests
from io import BytesIO
from urllib.parse import urlparse
try:
# if url.endswith('.gif'):
# out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# a = urlparse(url)
# out_name = os.path.basename(a.path)
# out_path = os.path.join(out_dir, out_name)
# download_from_url(url, out_name, out_dir, unzip=False)
# img = Image.open(out_path)
# else:
response = requests.get(url)
img = Image.open(BytesIO(response.content))
return img
except Exception as e:
print(e)
def has_transparency(img):
"""Checks whether an image has transparency.
Args:
img (object): a PIL Image object.
Returns:
bool: True if it has transparency, False otherwise.
"""
if img.mode == "P":
transparent = img.info.get("transparency", -1)
for _, index in img.getcolors():
if index == transparent:
return True
elif img.mode == "RGBA":
extrema = img.getextrema()
if extrema[3][0] < 255:
return True
return False
def add_image_to_gif(in_gif, out_gif, in_image, xy=None, image_size=(80, 80), circle_mask=False):
"""Adds an image logo to a GIF image.
Args:
in_gif (str): Input file path to the GIF image.
out_gif (str): Output file path to the GIF image.
in_image (str): Input file path to the image.
xy (tuple, optional): Top left corner of the text. It can be formatted like this: (10, 10) or ('15%', '25%'). Defaults to None.
image_size (tuple, optional): Resize image. Defaults to (80, 80).
circle_mask (bool, optional): Whether to apply a circle mask to the image. This only works with non-png images. Defaults to False.
"""
import io
import warnings
from PIL import Image, ImageDraw, ImageSequence, ImageFilter
warnings.simplefilter('ignore')
in_gif = os.path.abspath(in_gif)
is_url = False
if in_image.startswith('http'):
is_url = True
if not os.path.exists(in_gif):
print('The input gif file does not exist.')
return
if (not is_url) and (not os.path.exists(in_image)):
print('The provided logo file does not exist.')
return
if not os.path.exists(os.path.dirname(out_gif)):
os.makedirs(os.path.dirname(out_gif))
try:
image = Image.open(in_gif)
except Exception as e:
print('An error occurred while opening the image.')
print(e)
return
try:
if in_image.startswith('http'):
logo_raw_image = open_image_from_url(in_image)
else:
in_image = os.path.abspath(in_image)
logo_raw_image = Image.open(in_image)
except Exception as e:
print(e)
logo_raw_size = logo_raw_image.size
image_size = min(logo_raw_size[0], image_size[0]), min(
logo_raw_size[1], image_size[1])
logo_image = logo_raw_image.convert('RGBA')
logo_image.thumbnail(image_size, Image.ANTIALIAS)
W, H = image.size
mask_im = None
if circle_mask:
mask_im = Image.new("L", image_size, 0)
draw = ImageDraw.Draw(mask_im)
draw.ellipse((0, 0, image_size[0], image_size[1]), fill=255)
if has_transparency(logo_raw_image):
mask_im = logo_image.copy()
if xy is None:
# default logo location is 5% width and 5% height of the image.
xy = (int(0.05 * W), int(0.05 * H))
elif (xy is not None) and (not isinstance(xy, tuple)) and (len(xy) == 2):
print("xy must be a tuple, e.g., (10, 10), ('10%', '10%')")
return
elif all(isinstance(item, int) for item in xy) and (len(xy) == 2):
x, y = xy
if (x > 0) and (x < W) and (y > 0) and (y < H):
pass
else:
print(
'xy is out of bounds. x must be within [0, {}], and y must be within [0, {}]'.format(W, H))
return
elif all(isinstance(item, str) for item in xy) and (len(xy) == 2):
x, y = xy
if ('%' in x) and ('%' in y):
try:
x = int(float(x.replace('%', '')) / 100.0 * W)
y = int(float(y.replace('%', '')) / 100.0 * H)
xy = (x, y)
except Exception as e:
print(
"The specified xy is invalid. It must be formatted like this ('10%', '10%')")
return
else:
print("The specified xy is invalid. It must be formatted like this: (10, 10) or ('10%', '10%')")
return
try:
frames = []
for index, frame in enumerate(ImageSequence.Iterator(image)):
frame = frame.convert('RGBA')
frame.paste(logo_image, xy, mask_im)
b = io.BytesIO()
frame.save(b, format="GIF")
frame = Image.open(b)
frames.append(frame)
frames[0].save(out_gif, save_all=True, append_images=frames[1:])
except Exception as e:
print(e)
def show_image(img_path, width=None, height=None):
"""Shows an image within Jupyter notebook.
Args:
img_path (str): The image file path.
width (int, optional): Width of the image in pixels. Defaults to None.
height (int, optional): Height of the image in pixels. Defaults to None.
"""
from IPython.display import display
try:
out = widgets.Output()
# layout={'border': '1px solid black'})
# layout={'border': '1px solid black', 'width': str(width + 20) + 'px', 'height': str(height + 10) + 'px'},)
out.clear_output(wait=True)
display(out)
with out:
file = open(img_path, "rb")
image = file.read()
if (width is None) and (height is None):
display(widgets.Image(value=image))
elif (width is not None) and (height is not None):
display(widgets.Image(value=image, width=width, height=height))
else:
print('You need set both width and height.')
return
except Exception as e:
print(e)
def legend_from_ee(ee_class_table):
"""Extract legend from an Earth Engine class table on the Earth Engine Data Catalog page
such as https://developers.google.com/earth-engine/datasets/catalog/MODIS_051_MCD12Q1
Value Color Description
0 1c0dff Water
1 05450a Evergreen needleleaf forest
2 086a10 Evergreen broadleaf forest
3 54a708 Deciduous needleleaf forest
4 78d203 Deciduous broadleaf forest
5 009900 Mixed forest
6 c6b044 Closed shrublands
7 dcd159 Open shrublands
8 dade48 Woody savannas
9 fbff13 Savannas
10 b6ff05 Grasslands
11 27ff87 Permanent wetlands
12 c24f44 Croplands
13 a5a5a5 Urban and built-up
14 ff6d4c Cropland/natural vegetation mosaic
15 69fff8 Snow and ice
16 f9ffa4 Barren or sparsely vegetated
254 ffffff Unclassified
Args:
ee_class_table (str): An Earth Engine class table with triple quotes.
Returns:
dict: Returns a legend dictionary that can be used to create a legend.
"""
try:
ee_class_table = ee_class_table.strip()
lines = ee_class_table.split('\n')[1:]
if lines[0] == 'Value\tColor\tDescription':
lines = lines[1:]
legend_dict = {}
for index, line in enumerate(lines):
items = line.split("\t")
items = [item.strip() for item in items]
color = items[1]
key = items[0] + " " + items[2]
legend_dict[key] = color
return legend_dict
except Exception as e:
print(e)
def ee_tile_layer(ee_object, vis_params={}, name='Layer untitled', shown=True, opacity=1.0):
"""Converts and Earth Engine layer to ipyleaflet TileLayer.
Args:
ee_object (Collection|Feature|Image|MapId): The object to add to the map.
vis_params (dict, optional): The visualization parameters. Defaults to {}.
name (str, optional): The name of the layer. Defaults to 'Layer untitled'.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.
"""
# ee_initialize()
image = None
if not isinstance(ee_object, ee.Image) and not isinstance(ee_object, ee.ImageCollection) and not isinstance(ee_object, ee.FeatureCollection) and not isinstance(ee_object, ee.Feature) and not isinstance(ee_object, ee.Geometry):
err_str = "\n\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):
features = ee.FeatureCollection(ee_object)
width = 2
if 'width' in vis_params:
width = vis_params['width']
color = '000000'
if 'color' in vis_params:
color = vis_params['color']
image_fill = features.style(
**{'fillColor': color}).updateMask(ee.Image.constant(0.5))
image_outline = features.style(
**{'color': color, 'fillColor': '00000000', 'width': width})
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
map_id_dict = ee.Image(image).getMapId(vis_params)
tile_layer = ipyleaflet.TileLayer(
url=map_id_dict['tile_fetcher'].url_format,
attribution='Google Earth Engine',
name=name,
opacity=opacity,
visible=True
# visible=shown
)
return tile_layer
def geojson_to_ee(geo_json, geodesic=True):
"""Converts a geojson to ee.Geometry()
Args:
geo_json (dict): A geojson geometry dictionary or file path.
Returns:
ee_object: An ee.Geometry object
"""
# ee_initialize()
try:
import json
if not isinstance(geo_json, dict) and os.path.isfile(geo_json):
with open(os.path.abspath(geo_json)) as f:
geo_json = json.load(f)
if geo_json['type'] == 'FeatureCollection':
features = ee.FeatureCollection(geo_json['features'])
return features
elif geo_json['type'] == 'Feature':
geom = None
keys = geo_json['properties']['style'].keys()
if 'radius' in keys: # Checks whether it is a circle
geom = ee.Geometry(geo_json['geometry'])
radius = geo_json['properties']['style']['radius']
geom = geom.buffer(radius)
elif geo_json['geometry']['type'] == 'Point': # Checks whether it is a point
coordinates = geo_json['geometry']['coordinates']
longitude = coordinates[0]
latitude = coordinates[1]
geom = ee.Geometry.Point(longitude, latitude)
else:
geom = ee.Geometry(geo_json['geometry'], "", geodesic)
return geom
else:
print("Could not convert the geojson to ee.Geometry()")
except Exception as e:
print("Could not convert the geojson to ee.Geometry()")
print(e)
def ee_to_geojson(ee_object, out_json=None):
"""Converts Earth Engine object to geojson.
Args:
ee_object (object): An Earth Engine object.
Returns:
object: GeoJSON object.
"""
from json import dumps
# ee_initialize()
try:
if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):
json_object = ee_object.getInfo()
if out_json is not None:
out_json = os.path.abspath(out_json)
if not os.path.exists(os.path.dirname(out_json)):
os.makedirs(os.path.dirname(out_json))
geojson = open(out_json, "w")
geojson.write(
dumps({"type": "FeatureCollection", "features": json_object}, indent=2) + "\n")
geojson.close()
return json_object
else:
print("Could not convert the Earth Engine object to geojson")
except Exception as e:
print(e)
def open_github(subdir=None):
"""Opens the GitHub repository for this package.
Args:
subdir (str, optional): Sub-directory of the repository. Defaults to None.
"""
import webbrowser
url = 'https://github.com/giswqs/geemap'
if subdir == 'source':
url += '/tree/master/geemap/'
elif subdir == 'examples':
url += '/tree/master/examples'
elif subdir == 'tutorials':
url += '/tree/master/tutorials'
webbrowser.open_new_tab(url)
def clone_repo(out_dir='.', unzip=True):
"""Clones the geemap GitHub repository.
Args:
out_dir (str, optional): Output folder for the repo. Defaults to '.'.
unzip (bool, optional): Whether to unzip the repository. Defaults to True.
"""
url = 'https://github.com/giswqs/geemap/archive/master.zip'
filename = 'geemap-master.zip'
download_from_url(url, out_file_name=filename,
out_dir=out_dir, unzip=unzip)
def open_youtube():
"""Opens the YouTube tutorials for geemap.
"""
import webbrowser
url = 'https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPccOFv1dCwvGI6TYnirRTg3'
webbrowser.open_new_tab(url)
def api_docs():
"""Open a browser and navigate to the geemap API documentation.
"""
import webbrowser
url = 'https://giswqs.github.io/geemap/geemap'
webbrowser.open_new_tab(url)
def show_youtube(id='h0pz3S6Tvx0'):
"""Displays a YouTube video within Jupyter notebooks.
Args:
id (str, optional): Unique ID of the video. Defaults to 'h0pz3S6Tvx0'.
"""
from IPython.display import YouTubeVideo, display
try:
out = widgets.Output(
layout={'width': '815px'})
# layout={'border': '1px solid black', 'width': '815px'})
out.clear_output(wait=True)
display(out)
with out:
display(YouTubeVideo(id, width=800, height=450))
except Exception as e:
print(e)
def check_install(package):
"""Checks whether a package is installed. If not, it will install the package.
Args:
package (str): The name of the package to check.
"""
import subprocess
try:
__import__(package)
# print('{} is already installed.'.format(package))
except ImportError:
print('{} is not installed. Installing ...'.format(package))
try:
subprocess.check_call(["python", '-m', 'pip', 'install', package])
except Exception as e:
print('Failed to install {}'.format(package))
print(e)
print("{} has been installed successfully.".format(package))
def update_package():
"""Updates the geemap package from the geemap GitHub repository without the need to use pip or conda.
In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.
"""
import shutil
try:
download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
if not os.path.exists(download_dir):
os.makedirs(download_dir)
clone_repo(out_dir=download_dir)
pkg_dir = os.path.join(download_dir, 'geemap-master')
work_dir = os.getcwd()
os.chdir(pkg_dir)
if shutil.which('pip') is None:
cmd = 'pip3 install .'
else:
cmd = 'pip install .'
os.system(cmd)
os.chdir(work_dir)
print("\nPlease comment out 'geemap.update_package()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output")
except Exception as e:
print(e)
def csv_to_shp(in_csv, out_shp, longitude='longitude', latitude='latitude'):
"""Converts a csv file with latlon info to a point shapefile.
Args:
in_csv (str): The input csv file containing longitude and latitude columns.
out_shp (str): The file path to the output shapefile.
longitude (str, optional): The column name of the longitude column. Defaults to 'longitude'.
latitude (str, optional): The column name of the latitude column. Defaults to 'latitude'.
"""
import csv
import shapefile as shp
if not os.path.exists(in_csv):
print('The provided CSV file does not exist.')
return
if not in_csv.endswith('.csv'):
print('The input file must end with .csv')
return
out_dir = os.path.dirname(out_shp)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
try:
points = shp.Writer(out_shp, shapeType=shp.POINT)
with open(in_csv) as csvfile:
csvreader = csv.DictReader(csvfile)
header = csvreader.fieldnames
[points.field(field) for field in header]
for row in csvreader:
points.point((float(row[longitude])), (float(row[latitude])))
points.record(*tuple([row[f] for f in header]))
out_prj = out_shp.replace('.shp', '.prj')
with open(out_prj, 'w') as f:
prj_str = 'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.0174532925199433]] '
f.write(prj_str)
except Exception as e:
print(e)
def shp_to_geojson(in_shp, out_json=None):
"""Converts a shapefile to GeoJSON.
Args:
in_shp (str): File path of the input shapefile.
out_json (str, optional): File path of the output GeoJSON. Defaults to None.
Returns:
object: The json object representing the shapefile.
"""
# check_install('pyshp')
# ee_initialize()
try:
import json
import shapefile
in_shp = os.path.abspath(in_shp)
if out_json is None:
out_json = os.path.splitext(in_shp)[0] + ".json"
if os.path.exists(out_json):
out_json = out_json.replace('.json', '_bk.json')
elif not os.path.exists(os.path.dirname(out_json)):
os.makedirs(os.path.dirname(out_json))
reader = shapefile.Reader(in_shp)
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
buffer = []
for sr in reader.shapeRecords():
atr = dict(zip(field_names, sr.record))
geom = sr.shape.__geo_interface__
buffer.append(dict(type="Feature", geometry=geom, properties=atr))
from json import dumps
geojson = open(out_json, "w")
geojson.write(dumps({"type": "FeatureCollection",
"features": buffer}, indent=2) + "\n")
geojson.close()
with open(out_json) as f:
json_data = json.load(f)
return json_data
except Exception as e:
print(e)
def shp_to_ee(in_shp):
"""Converts a shapefile to Earth Engine objects.
Args:
in_shp (str): File path to a shapefile.
Returns:
object: Earth Engine objects representing the shapefile.
"""
# ee_initialize()
try:
json_data = shp_to_geojson(in_shp)
ee_object = geojson_to_ee(json_data)
return ee_object
except Exception as e:
print(e)
def filter_polygons(ftr):
"""Converts GeometryCollection to Polygon/MultiPolygon
Args:
ftr (object): ee.Feature
Returns:
object: ee.Feature
"""
# ee_initialize()
geometries = ftr.geometry().geometries()
geometries = geometries.map(lambda geo: ee.Feature(
ee.Geometry(geo)).set('geoType', ee.Geometry(geo).type()))
polygons = ee.FeatureCollection(geometries).filter(
ee.Filter.eq('geoType', 'Polygon')).geometry()
return ee.Feature(polygons).copyProperties(ftr)
def ee_export_vector(ee_object, filename, selectors=None):
"""Exports Earth Engine FeatureCollection to other formats, including shp, csv, json, kml, and kmz.
Args:
ee_object (object): ee.FeatureCollection to export.
filename (str): Output file name.
selectors (list, optional): A list of attributes to export. Defaults to None.
"""
import requests
import zipfile
# ee_initialize()
if not isinstance(ee_object, ee.FeatureCollection):
raise ValueError('ee_object must be an ee.FeatureCollection')
allowed_formats = ['csv', 'geojson', 'kml', 'kmz', 'shp']
# allowed_formats = ['csv', 'kml', 'kmz']
filename = os.path.abspath(filename)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:].lower()
if filetype == 'shp':
filename = filename.replace('.shp', '.zip')
if not (filetype.lower() in allowed_formats):
print('The file type must be one of the following: {}'.format(
', '.join(allowed_formats)))
print('Earth Engine no longer supports downloading featureCollection as shapefile or json. \nPlease use geemap.ee_export_vector_to_drive() to export featureCollection to Google Drive.')
raise ValueError
if selectors is None:
selectors = ee_object.first().propertyNames().getInfo()
if filetype == 'csv':
# remove .geo coordinate field
ee_object = ee_object.select([".*"], None, False)
if filetype == 'geojson':
selectors = ['.geo'] + selectors
elif not isinstance(selectors, list):
raise ValueError(
"selectors must be a list, such as ['attribute1', 'attribute2']")
else:
allowed_attributes = ee_object.first().propertyNames().getInfo()
for attribute in selectors:
if not (attribute in allowed_attributes):
raise ValueError('Attributes must be one chosen from: {} '.format(
', '.join(allowed_attributes)))
try:
print('Generating URL ...')
url = ee_object.getDownloadURL(
filetype=filetype, selectors=selectors, filename=name)
print('Downloading data from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
if r.status_code != 200:
print('An error occurred while downloading. \n Retrying ...')
try:
new_ee_object = ee_object.map(filter_polygons)
print('Generating URL ...')
url = new_ee_object.getDownloadURL(
filetype=filetype, selectors=selectors, filename=name)
print('Downloading data from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
except Exception as e:
print(e)
raise ValueError
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
except Exception as e:
print('An error occurred while downloading.')
raise ValueError(e)
try:
if filetype == 'shp':
z = zipfile.ZipFile(filename)
z.extractall(os.path.dirname(filename))
os.remove(filename)
filename = filename.replace('.zip', '.shp')
print('Data downloaded to {}'.format(filename))
except Exception as e:
raise ValueError(e)
def ee_export_vector_to_drive(ee_object, description, folder, file_format='shp', selectors=None):
"""Exports Earth Engine FeatureCollection to Google Drive. other formats, including shp, csv, json, kml, and kmz.
Args:
ee_object (object): ee.FeatureCollection to export.
description (str): File name of the output file.
folder (str): Folder name within Google Drive to save the exported file.
file_format (str, optional): The supported file format include shp, csv, json, kml, kmz, and TFRecord. Defaults to 'shp'.
selectors (list, optional): The list of attributes to export. Defaults to None.
"""
if not isinstance(ee_object, ee.FeatureCollection):
print('The ee_object must be an ee.FeatureCollection.')
return
allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp', 'tfrecord']
if not (file_format.lower() in allowed_formats):
print('The file type must be one of the following: {}'.format(
', '.join(allowed_formats)))
return
task_config = {
'folder': folder,
'fileFormat': file_format,
}
if selectors is not None:
task_config['selectors'] = selectors
elif (selectors is None) and (file_format.lower() == 'csv'):
# remove .geo coordinate field
ee_object = ee_object.select([".*"], None, False)
print('Exporting {}...'.format(description))
task = ee.batch.Export.table.toDrive(ee_object, description, **task_config)
task.start()
def ee_export_geojson(ee_object, filename=None, selectors=None):
"""Exports Earth Engine FeatureCollection to geojson.
Args:
ee_object (object): ee.FeatureCollection to export.
filename (str): Output file name. Defaults to None.
selectors (list, optional): A list of attributes to export. Defaults to None.
"""
import requests
import zipfile
# ee_initialize()
if not isinstance(ee_object, ee.FeatureCollection):
print('The ee_object must be an ee.FeatureCollection.')
return
if filename is None:
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
filename = os.path.join(out_dir, random_string(6) + '.geojson')
allowed_formats = ['geojson']
filename = os.path.abspath(filename)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:].lower()
if not (filetype.lower() in allowed_formats):
print('The output file type must be geojson.')
return
if selectors is None:
selectors = ee_object.first().propertyNames().getInfo()
selectors = ['.geo'] + selectors
elif not isinstance(selectors, list):
print("selectors must be a list, such as ['attribute1', 'attribute2']")
return
else:
allowed_attributes = ee_object.first().propertyNames().getInfo()
for attribute in selectors:
if not (attribute in allowed_attributes):
print('Attributes must be one chosen from: {} '.format(
', '.join(allowed_attributes)))
return
try:
# print('Generating URL ...')
url = ee_object.getDownloadURL(
filetype=filetype, selectors=selectors, filename=name)
# print('Downloading data from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
if r.status_code != 200:
print('An error occurred while downloading. \n Retrying ...')
try:
new_ee_object = ee_object.map(filter_polygons)
print('Generating URL ...')
url = new_ee_object.getDownloadURL(
filetype=filetype, selectors=selectors, filename=name)
print('Downloading data from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
except Exception as e:
print(e)
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
except Exception as e:
print('An error occurred while downloading.')
print(e)
return
with open(filename) as f:
geojson = f.read()
return geojson
def ee_to_shp(ee_object, filename, selectors=None):
"""Downloads an ee.FeatureCollection as a shapefile.
Args:
ee_object (object): ee.FeatureCollection
filename (str): The output filepath of the shapefile.
selectors (list, optional): A list of attributes to export. Defaults to None.
"""
# ee_initialize()
try:
if filename.lower().endswith('.shp'):
ee_export_vector(ee_object=ee_object,
filename=filename, selectors=selectors)
else:
print('The filename must end with .shp')
except Exception as e:
print(e)
def ee_to_csv(ee_object, filename, selectors=None):
"""Downloads an ee.FeatureCollection as a CSV file.
Args:
ee_object (object): ee.FeatureCollection
filename (str): The output filepath of the CSV file.
selectors (list, optional): A list of attributes to export. Defaults to None.
"""
# ee_initialize()
try:
if filename.lower().endswith('.csv'):
ee_export_vector(ee_object=ee_object,
filename=filename, selectors=selectors)
else:
print('The filename must end with .csv')
except Exception as e:
print(e)
def ee_export_image(ee_object, filename, scale=None, crs=None, region=None, file_per_band=False):
"""Exports an ee.Image as a GeoTIFF.
Args:
ee_object (object): The ee.Image to download.
filename (str): Output filename for the exported image.
scale (float, optional): A default scale to use for any bands that do not specify one; ignored if crs and crs_transform is specified. Defaults to None.
crs (str, optional): A default CRS string to use for any bands that do not explicitly specify one. Defaults to None.
region (object, optional): A polygon specifying a region to download; ignored if crs and crs_transform is specified. Defaults to None.
file_per_band (bool, optional): Whether to produce a different GeoTIFF per band. Defaults to False.
"""
import requests
import zipfile
# ee_initialize()
if not isinstance(ee_object, ee.Image):
print('The ee_object must be an ee.Image.')
return
filename = os.path.abspath(filename)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:].lower()
filename_zip = filename.replace('.tif', '.zip')
if filetype != 'tif':
print('The filename must end with .tif')
return
try:
print('Generating URL ...')
params = {'name': name, 'filePerBand': file_per_band}
if scale is None:
scale = ee_object.projection().nominalScale().multiply(10)
params['scale'] = scale
if region is None:
region = ee_object.geometry()
params['region'] = region
if crs is not None:
params['crs'] = crs
url = ee_object.getDownloadURL(params)
print('Downloading data from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
if r.status_code != 200:
print('An error occurred while downloading.')
return
with open(filename_zip, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
except Exception as e:
print('An error occurred while downloading.')
print(e)
return
try:
z = zipfile.ZipFile(filename_zip)
z.extractall(os.path.dirname(filename))
z.close()
os.remove(filename_zip)
if file_per_band:
print('Data downloaded to {}'.format(os.path.dirname(filename)))
else:
print('Data downloaded to {}'.format(filename))
except Exception as e:
print(e)
def ee_export_image_collection(ee_object, out_dir, scale=None, crs=None, region=None, file_per_band=False):
"""Exports an ImageCollection as GeoTIFFs.
Args:
ee_object (object): The ee.Image to download.
out_dir (str): The output directory for the exported images.
scale (float, optional): A default scale to use for any bands that do not specify one; ignored if crs and crs_transform is specified. Defaults to None.
crs (str, optional): A default CRS string to use for any bands that do not explicitly specify one. Defaults to None.
region (object, optional): A polygon specifying a region to download; ignored if crs and crs_transform is specified. Defaults to None.
file_per_band (bool, optional): Whether to produce a different GeoTIFF per band. Defaults to False.
"""
import requests
import zipfile
# ee_initialize()
if not isinstance(ee_object, ee.ImageCollection):
print('The ee_object must be an ee.ImageCollection.')
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
try:
count = int(ee_object.size().getInfo())
print("Total number of images: {}\n".format(count))
for i in range(0, count):
image = ee.Image(ee_object.toList(count).get(i))
name = image.get('system:index').getInfo() + '.tif'
filename = os.path.join(os.path.abspath(out_dir), name)
print('Exporting {}/{}: {}'.format(i+1, count, name))
ee_export_image(image, filename=filename, scale=scale,
crs=crs, region=region, file_per_band=file_per_band)
print('\n')
except Exception as e:
print(e)
def ee_export_image_to_drive(ee_object, description, folder=None, region=None, scale=None, crs=None, max_pixels=1.0E13, file_format='GeoTIFF'):
"""Creates a batch task to export an Image as a raster to Google Drive.
Args:
ee_object (object): The image to export.
description (str): A human-readable name of the task.
folder (str, optional): The Google Drive Folder that the export will reside in. Defaults to None.
region (object, optional): A LinearRing, Polygon, or coordinates representing region to export. These may be specified as the Geometry objects or coordinates serialized as a string. If not specified, the region defaults to the viewport at the time of invocation. Defaults to None.
scale (float, optional): Resolution in meters per pixel. Defaults to 10 times of the image resolution.
crs (str, optional): CRS to use for the exported image.. Defaults to None.
max_pixels (int, optional): Restrict the number of pixels in the export. Defaults to 1.0E13.
file_format (str, optional): The string file format to which the image is exported. Currently only 'GeoTIFF' and 'TFRecord' are supported. Defaults to 'GeoTIFF'.
"""
# ee_initialize()
if not isinstance(ee_object, ee.Image):
print('The ee_object must be an ee.Image.')
return
try:
params = {}
if folder is not None:
params['driveFolder'] = folder
if region is not None:
params['region'] = region
if scale is None:
scale = ee_object.projection().nominalScale().multiply(10)
params['scale'] = scale
if crs is not None:
params['crs'] = crs
params['maxPixels'] = max_pixels
params['fileFormat'] = file_format
task = ee.batch.Export.image(ee_object, description, params)
task.start()
print('Exporting {} ...'.format(description))
except Exception as e:
print(e)
def ee_export_image_collection_to_drive(ee_object, descriptions=None, folder=None, region=None, scale=None, crs=None, max_pixels=1.0E13, file_format='GeoTIFF'):
"""Creates a batch task to export an ImageCollection as raster images to Google Drive.
Args:
ee_object (object): The image to export.
descriptions (list): A list of human-readable names of the tasks.
folder (str, optional): The Google Drive Folder that the export will reside in. Defaults to None.
region (object, optional): A LinearRing, Polygon, or coordinates representing region to export. These may be specified as the Geometry objects or coordinates serialized as a string. If not specified, the region defaults to the viewport at the time of invocation. Defaults to None.
scale (float, optional): Resolution in meters per pixel. Defaults to 10 times of the image resolution.
crs (str, optional): CRS to use for the exported image.. Defaults to None.
max_pixels (int, optional): Restrict the number of pixels in the export. Defaults to 1.0E13.
file_format (str, optional): The string file format to which the image is exported. Currently only 'GeoTIFF' and 'TFRecord' are supported. Defaults to 'GeoTIFF'.
"""
# ee_initialize()
if not isinstance(ee_object, ee.ImageCollection):
print('The ee_object must be an ee.ImageCollection.')
return
try:
count = int(ee_object.size().getInfo())
print("Total number of images: {}\n".format(count))
if (descriptions is not None) and (len(descriptions) != count):
print('The number of descriptions is not equal to the number of images.')
return
if descriptions is None:
descriptions = ee_object.aggregate_array('system:index').getInfo()
images = ee_object.toList(count)
for i in range(0, count):
image = ee.Image(images.get(i))
name = descriptions[i]
ee_export_image_to_drive(
image, name, folder, region, scale, crs, max_pixels, file_format)
except Exception as e:
print(e)
def ee_to_numpy(ee_object, bands=None, region=None, properties=None, default_value=None):
"""Extracts a rectangular region of pixels from an image into a 2D numpy array per band.
Args:
ee_object (object): The image to sample.
bands (list, optional): The list of band names to extract. Please make sure that all bands have the same spatial resolution. Defaults to None.
region (object, optional): The region whose projected bounding box is used to sample the image. The maximum number of pixels you can export is 262,144. Resampling and reprojecting all bands to a fixed scale can be useful. Defaults to the footprint in each band.
properties (list, optional): The properties to copy over from the sampled image. Defaults to all non-system properties.
default_value (float, optional): A default value used when a sampled pixel is masked or outside a band's footprint. Defaults to None.
Returns:
array: A 3D numpy array.
"""
import numpy as np
if not isinstance(ee_object, ee.Image):
print('The input must be an ee.Image.')
return
if region is None:
region = ee_object.geometry()
try:
if bands is not None:
ee_object = ee_object.select(bands)
else:
bands = ee_object.bandNames().getInfo()
band_count = len(bands)
band_arrs = ee_object.sampleRectangle(
region=region, properties=properties, defaultValue=default_value)
band_values = []
for band in bands:
band_arr = band_arrs.get(band).getInfo()
band_value = np.array(band_arr)
band_values.append(band_value)
image = np.dstack(band_values)
return image
except Exception as e:
print(e)
def download_ee_video(collection, video_args, out_gif):
"""Downloads a video thumbnail as a GIF image from Earth Engine.
Args:
collection (object): An ee.ImageCollection.
video_args (object): Parameters for expring the video thumbnail.
out_gif (str): File path to the output GIF.
"""
import requests
out_gif = os.path.abspath(out_gif)
if not out_gif.endswith(".gif"):
print('The output file must have an extension of .gif.')
return
if not os.path.exists(os.path.dirname(out_gif)):
os.makedirs(os.path.dirname(out_gif))
if 'region' in video_args.keys():
roi = video_args['region']
if not isinstance(roi, ee.Geometry):
try:
roi = roi.geometry()
except Exception as e:
print('Could not convert the provided roi to ee.Geometry')
print(e)
return
video_args['region'] = roi
try:
print('Generating URL...')
url = collection.getVideoThumbURL(video_args)
print('Downloading GIF image from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
if r.status_code != 200:
print('An error occurred while downloading.')
return
else:
with open(out_gif, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
print('The GIF image has been saved to: {}'.format(out_gif))
except Exception as e:
print(e)
def zonal_statistics(in_value_raster, in_zone_vector, out_file_path, statistics_type='MEAN', scale=None, crs=None, tile_scale=1.0, **kwargs):
"""Summarizes the values of a raster within the zones of another dataset and exports the results as a csv, shp, json, kml, or kmz.
Args:
in_value_raster (object): An ee.Image that contains the values on which to calculate a statistic.
in_zone_vector (object): An ee.FeatureCollection that defines the zones.
out_file_path (str): Output file path that will contain the summary of the values in each zone. The file type can be: csv, shp, json, kml, kmz
statistics_type (str, optional): Statistic type to be calculated. Defaults to 'MEAN'. For 'HIST', you can provide three parameters: max_buckets, min_bucket_width, and max_raw. For 'FIXED_HIST', you must provide three parameters: hist_min, hist_max, and hist_steps.
scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.
crs (str, optional): The projection to work in. If unspecified, the projection of the image's first band is used. If specified in addition to scale, rescaled to the specified scale. Defaults to None.
tile_scale (float, optional): A scaling factor used to reduce aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable computations that run out of memory with the default. Defaults to 1.0.
"""
if not isinstance(in_value_raster, ee.Image):
print('The input raster must be an ee.Image.')
return
if not isinstance(in_zone_vector, ee.FeatureCollection):
print('The input zone data must be an ee.FeatureCollection.')
return
allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']
filename = os.path.abspath(out_file_path)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:].lower()
if not (filetype in allowed_formats):
print('The file type must be one of the following: {}'.format(
', '.join(allowed_formats)))
return
# Parameters for histogram
# The maximum number of buckets to use when building a histogram; will be rounded up to a power of 2.
max_buckets = None
# The minimum histogram bucket width, or null to allow any power of 2.
min_bucket_width = None
# The number of values to accumulate before building the initial histogram.
max_raw = None
hist_min = 1.0 # The lower (inclusive) bound of the first bucket.
hist_max = 100.0 # The upper (exclusive) bound of the last bucket.
hist_steps = 10 # The number of buckets to use.
if 'max_buckets' in kwargs.keys():
max_buckets = kwargs['max_buckets']
if 'min_bucket_width' in kwargs.keys():
min_bucket_width = kwargs['min_bucket']
if 'max_raw' in kwargs.keys():
max_raw = kwargs['max_raw']
if statistics_type.upper() == 'FIXED_HIST' and ('hist_min' in kwargs.keys()) and ('hist_max' in kwargs.keys()) and ('hist_steps' in kwargs.keys()):
hist_min = kwargs['hist_min']
hist_max = kwargs['hist_max']
hist_steps = kwargs['hist_steps']
elif statistics_type.upper() == 'FIXED_HIST':
print('To use fixedHistogram, please provide these three parameters: hist_min, hist_max, and hist_steps.')
return
allowed_statistics = {
'MEAN': ee.Reducer.mean(),
'MAXIMUM': ee.Reducer.max(),
'MEDIAN': ee.Reducer.median(),
'MINIMUM': ee.Reducer.min(),
'STD': ee.Reducer.stdDev(),
'MIN_MAX': ee.Reducer.minMax(),
'SUM': ee.Reducer.sum(),
'VARIANCE': ee.Reducer.variance(),
'HIST': ee.Reducer.histogram(maxBuckets=max_buckets, minBucketWidth=min_bucket_width, maxRaw=max_raw),
'FIXED_HIST': ee.Reducer.fixedHistogram(hist_min, hist_max, hist_steps)
}
if not (statistics_type.upper() in allowed_statistics.keys()):
print('The statistics type must be one of the following: {}'.format(
', '.join(list(allowed_statistics.keys()))))
return
if scale is None:
scale = in_value_raster.projection().nominalScale().multiply(10)
try:
print('Computing statistics ...')
result = in_value_raster.reduceRegions(
collection=in_zone_vector, reducer=allowed_statistics[statistics_type], scale=scale, crs=crs, tileScale=tile_scale)
ee_export_vector(result, filename)
except Exception as e:
print(e)
def zonal_statistics_by_group(in_value_raster, in_zone_vector, out_file_path, statistics_type='SUM', decimal_places=0, denominator=1.0, scale=None, crs=None, tile_scale=1.0):
"""Summarizes the area or percentage of a raster by group within the zones of another dataset and exports the results as a csv, shp, json, kml, or kmz.
Args:
in_value_raster (object): An integer Image that contains the values on which to calculate area/percentage.
in_zone_vector (object): An ee.FeatureCollection that defines the zones.
out_file_path (str): Output file path that will contain the summary of the values in each zone. The file type can be: csv, shp, json, kml, kmz
statistics_type (str, optional): Can be either 'SUM' or 'PERCENTAGE' . Defaults to 'SUM'.
decimal_places (int, optional): The number of decimal places to use. Defaults to 0.
denominator (float, optional): To covert area units (e.g., from square meters to square kilometers). Defaults to 1.0.
scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.
crs (str, optional): The projection to work in. If unspecified, the projection of the image's first band is used. If specified in addition to scale, rescaled to the specified scale. Defaults to None.
tile_scale (float, optional): A scaling factor used to reduce aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable computations that run out of memory with the default. Defaults to 1.0.
"""
if not isinstance(in_value_raster, ee.Image):
print('The input raster must be an ee.Image.')
return
band_count = in_value_raster.bandNames().size().getInfo()
band_name = ''
if band_count == 1:
band_name = in_value_raster.bandNames().get(0)
else:
print('The input image can only have one band.')
return
band_types = in_value_raster.bandTypes().get(band_name).getInfo()
band_type = band_types.get('precision')
if band_type != 'int':
print('The input image band must be integer type.')
return
if not isinstance(in_zone_vector, ee.FeatureCollection):
print('The input zone data must be an ee.FeatureCollection.')
return
allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']
filename = os.path.abspath(out_file_path)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:]
if not (filetype.lower() in allowed_formats):
print('The file type must be one of the following: {}'.format(
', '.join(allowed_formats)))
return
out_dir = os.path.dirname(filename)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
allowed_statistics = ['SUM', 'PERCENTAGE']
if not (statistics_type.upper() in allowed_statistics):
print('The statistics type can only be one of {}'.format(
', '.join(allowed_statistics)))
return
if scale is None:
scale = in_value_raster.projection().nominalScale().multiply(10)
try:
print('Computing ... ')
geometry = in_zone_vector.geometry()
hist = in_value_raster.reduceRegion(ee.Reducer.frequencyHistogram(
), geometry=geometry, bestEffort=True, scale=scale)
class_values = ee.Dictionary(hist.get(band_name)).keys().map(
lambda v: ee.Number.parse(v)).sort()
class_names = class_values.map(
lambda c: ee.String('Class_').cat(ee.Number(c).format()))
class_count = class_values.size().getInfo()
dataset = ee.Image.pixelArea().divide(denominator).addBands(in_value_raster)
init_result = dataset.reduceRegions(**{
'collection': in_zone_vector,
'reducer': ee.Reducer.sum().group(**{
'groupField': 1,
'groupName': 'group',
}),
'scale': scale
})
def build_dict(input_list):
decimal_format = '%.{}f'.format(decimal_places)
in_dict = input_list.map(lambda x: ee.Dictionary().set(ee.String('Class_').cat(
ee.Number(ee.Dictionary(x).get('group')).format()), ee.Number.parse(ee.Number(ee.Dictionary(x).get('sum')).format(decimal_format))))
return in_dict
def get_keys(input_list):
return input_list.map(lambda x: ee.String('Class_').cat(ee.Number(ee.Dictionary(x).get('group')).format()))
def get_values(input_list):
decimal_format = '%.{}f'.format(decimal_places)
return input_list.map(lambda x: ee.Number.parse(ee.Number(ee.Dictionary(x).get('sum')).format(decimal_format)))
def set_attribute(f):
groups = ee.List(f.get('groups'))
keys = get_keys(groups)
values = get_values(groups)
total_area = ee.List(values).reduce(ee.Reducer.sum())
def get_class_values(x):
cls_value = ee.Algorithms.If(
keys.contains(x), values.get(keys.indexOf(x)), 0)
cls_value = ee.Algorithms.If(ee.String(statistics_type).compareTo(ee.String(
'SUM')), ee.Number(cls_value).divide(ee.Number(total_area)), cls_value)
return cls_value
full_values = class_names.map(lambda x: get_class_values(x))
attr_dict = ee.Dictionary.fromLists(class_names, full_values)
attr_dict = attr_dict.set('Class_sum', total_area)
return f.set(attr_dict).set('groups', None)
final_result = init_result.map(set_attribute)
ee_export_vector(final_result, filename)
except Exception as e:
print(e)
def create_colorbar(width=150, height=30, palette=['blue', 'green', 'red'], add_ticks=True, add_labels=True, labels=None, vertical=False, out_file=None, font_type='arial.ttf', font_size=12, font_color='black', add_outline=True, outline_color='black'):
"""Creates a colorbar based on the provided palette.
Args:
width (int, optional): Width of the colorbar in pixels. Defaults to 150.
height (int, optional): Height of the colorbar in pixels. Defaults to 30.
palette (list, optional): Palette for the colorbar. Each color can be provided as a string (e.g., 'red'), a hex string (e.g., '#ff0000'), or an RGB tuple (255, 0, 255). Defaults to ['blue', 'green', 'red'].
add_ticks (bool, optional): Whether to add tick markers to the colorbar. Defaults to True.
add_labels (bool, optional): Whether to add labels to the colorbar. Defaults to True.
labels (list, optional): A list of labels to add to the colorbar. Defaults to None.
vertical (bool, optional): Whether to rotate the colorbar vertically. Defaults to False.
out_file (str, optional): File path to the output colorbar in png format. Defaults to None.
font_type (str, optional): Font type to use for labels. Defaults to 'arial.ttf'.
font_size (int, optional): Font size to use for labels. Defaults to 12.
font_color (str, optional): Font color to use for labels. Defaults to 'black'.
add_outline (bool, optional): Whether to add an outline to the colorbar. Defaults to True.
outline_color (str, optional): Color for the outline of the colorbar. Defaults to 'black'.
Returns:
str: File path of the output colorbar in png format.
"""
import decimal
import io
import pkg_resources
import warnings
from colour import Color
from PIL import Image, ImageDraw, ImageFont
warnings.simplefilter('ignore')
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py"))
if out_file is None:
filename = 'colorbar_' + random_string() + '.png'
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
out_file = os.path.join(out_dir, filename)
elif not out_file.endswith('.png'):
print('The output file must end with .png')
return
else:
out_file = os.path.abspath(out_file)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
im = Image.new('RGBA', (width, height))
ld = im.load()
def float_range(start, stop, step):
while start < stop:
yield float(start)
start += decimal.Decimal(step)
n_colors = len(palette)
decimal_places = 2
rgb_colors = [Color(check_color(c)).rgb for c in palette]
keys = [round(c, decimal_places)
for c in list(float_range(0, 1.0001, 1.0/(n_colors - 1)))]
heatmap = []
for index, item in enumerate(keys):
pair = [item, rgb_colors[index]]
heatmap.append(pair)
def gaussian(x, a, b, c, d=0):
return a * math.exp(-(x - b)**2 / (2 * c**2)) + d
def pixel(x, width=100, map=[], spread=1):
width = float(width)
r = sum([gaussian(x, p[1][0], p[0] * width, width/(spread*len(map)))
for p in map])
g = sum([gaussian(x, p[1][1], p[0] * width, width/(spread*len(map)))
for p in map])
b = sum([gaussian(x, p[1][2], p[0] * width, width/(spread*len(map)))
for p in map])
return min(1.0, r), min(1.0, g), min(1.0, b)
for x in range(im.size[0]):
r, g, b = pixel(x, width=width, map=heatmap)
r, g, b = [int(256*v) for v in (r, g, b)]
for y in range(im.size[1]):
ld[x, y] = r, g, b
if add_outline:
draw = ImageDraw.Draw(im)
draw.rectangle([(0, 0), (width-1, height-1)],
outline=check_color(outline_color))
del draw
if add_ticks:
tick_length = height * 0.1
x = [key * width for key in keys]
y_top = height - tick_length
y_bottom = height
draw = ImageDraw.Draw(im)
for i in x:
shape = [(i, y_top), (i, y_bottom)]
draw.line(shape, fill='black', width=0)
del draw
if vertical:
im = im.transpose(Image.ROTATE_90)
width, height = im.size
if labels is None:
labels = [str(c) for c in keys]
elif len(labels) == 2:
try:
lowerbound = float(labels[0])
upperbound = float(labels[1])
step = (upperbound - lowerbound) / (len(palette) - 1)
labels = [str(lowerbound + c * step)
for c in range(0, len(palette))]
except Exception as e:
print(e)
print('The labels are invalid.')
return
elif len(labels) == len(palette):
labels = [str(c) for c in labels]
else:
print('The labels must have the same length as the palette.')
return
if add_labels:
default_font = os.path.join(pkg_dir, 'data/fonts/arial.ttf')
if font_type == 'arial.ttf':
font = ImageFont.truetype(default_font, font_size)
else:
try:
font_list = system_fonts(show_full_path=True)
font_names = [os.path.basename(f) for f in font_list]
if (font_type in font_list) or (font_type in font_names):
font = ImageFont.truetype(font_type, font_size)
else:
print(
'The specified font type could not be found on your system. Using the default font instead.')
font = ImageFont.truetype(default_font, font_size)
except Exception as e:
print(e)
font = ImageFont.truetype(default_font, font_size)
font_color = check_color(font_color)
draw = ImageDraw.Draw(im)
w, h = draw.textsize(labels[0], font=font)
for label in labels:
w_tmp, h_tmp = draw.textsize(label, font)
if w_tmp > w:
w = w_tmp
if h_tmp > h:
h = h_tmp
W, H = width + w * 2, height + h * 2
background = Image.new('RGBA', (W, H))
draw = ImageDraw.Draw(background)
if vertical:
xy = (0, h)
else:
xy = (w, 0)
background.paste(im, xy, im)
for index, label in enumerate(labels):
w_tmp, h_tmp = draw.textsize(label, font)
if vertical:
spacing = 5
x = width + spacing
y = int(height + h - keys[index] * height - h_tmp / 2 - 1)
draw.text((x, y), label, font=font, fill=font_color)
else:
x = int(keys[index] * width + w - w_tmp / 2)
spacing = int(h * 0.05)
y = height + spacing
draw.text((x, y), label, font=font, fill=font_color)
im = background.copy()
im.save(out_file)
return out_file
def naip_timeseries(roi=None, start_year=2009, end_year=2018):
"""Creates NAIP annual timeseries
Args:
roi (object, optional): An ee.Geometry representing the region of interest. Defaults to None.
start_year (int, optional): Starting year for the timeseries. Defaults to2009.
end_year (int, optional): Ending year for the timeseries. Defaults to 2018.
Returns:
object: An ee.ImageCollection representing annual NAIP imagery.
"""
# ee_initialize()
try:
def get_annual_NAIP(year):
try:
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
if roi is not None:
collection = collection.filterBounds(roi)
start_date = ee.Date.fromYMD(year, 1, 1)
end_date = ee.Date.fromYMD(year, 12, 31)
naip = collection.filterDate(start_date, end_date) \
.filter(ee.Filter.listContains("system:band_names", "N"))
naip = ee.Image(ee.ImageCollection(naip).mosaic())
return naip
except Exception as e:
print(e)
years = ee.List.sequence(start_year, end_year)
collection = years.map(get_annual_NAIP)
return collection
except Exception as e:
print(e)
def sentinel2_timeseries(roi=None, start_year=2015, end_year=2019, start_date='01-01', end_date='12-31'):
"""Generates an annual Sentinel 2 ImageCollection. This algorithm is adapted from https://gist.github.com/jdbcode/76b9ac49faf51627ebd3ff988e10adbc. A huge thank you to Justin Braaten for sharing his fantastic work.
Images include both level 1C and level 2A imagery.
Args:
roi (object, optional): Region of interest to create the timelapse. Defaults to None.
start_year (int, optional): Starting year for the timelapse. Defaults to 2015.
end_year (int, optional): Ending year for the timelapse. Defaults to 2019.
start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '01-01'.
end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '12-31'.
Returns:
object: Returns an ImageCollection containing annual Sentinel 2 images.
"""
################################################################################
################################################################################
# Input and output parameters.
import re
import datetime
# ee_initialize()
if roi is None:
# roi = ee.Geometry.Polygon(
# [[[-180, -80],
# [-180, 80],
# [180, 80],
# [180, -80],
# [-180, -80]]], None, False)
roi = ee.Geometry.Polygon(
[[[-115.471773, 35.892718],
[-115.471773, 36.409454],
[-114.271283, 36.409454],
[-114.271283, 35.892718],
[-115.471773, 35.892718]]], None, False)
if not isinstance(roi, ee.Geometry):
try:
roi = roi.geometry()
except Exception as e:
print('Could not convert the provided roi to ee.Geometry')
print(e)
return
# Adjusts longitudes less than -180 degrees or greater than 180 degrees.
geojson = ee_to_geojson(roi)
geojson = adjust_longitude(geojson)
roi = ee.Geometry(geojson)
################################################################################
# Setup vars to get dates.
if isinstance(start_year, int) and (start_year >= 2015) and (start_year <= 2020):
pass
else:
print('The start year must be an integer >= 2015.')
return
if isinstance(end_year, int) and (end_year >= 2015) and (end_year <= 2020):
pass
else:
print('The end year must be an integer <= 2020.')
return
if re.match("[0-9]{2}\-[0-9]{2}", start_date) and re.match("[0-9]{2}\-[0-9]{2}", end_date):
pass
else:
print('The start data and end date must be month-day, such as 06-10, 09-20')
return
try:
datetime.datetime(int(start_year), int(
start_date[:2]), int(start_date[3:5]))
datetime.datetime(int(end_year), int(end_date[:2]), int(end_date[3:5]))
except Exception as e:
print('The input dates are invalid.')
print(e)
return
try:
start_test = datetime.datetime(int(start_year), int(
start_date[:2]), int(start_date[3:5]))
end_test = datetime.datetime(
int(end_year), int(end_date[:2]), int(end_date[3:5]))
if start_test > end_test:
raise ValueError('Start date must be prior to end date')
except Exception as e:
print(e)
return
def days_between(d1, d2):
d1 = datetime.datetime.strptime(d1, "%Y-%m-%d")
d2 = datetime.datetime.strptime(d2, "%Y-%m-%d")
return abs((d2 - d1).days)
n_days = days_between(str(start_year) + '-' + start_date,
str(start_year) + '-' + end_date)
start_month = int(start_date[:2])
start_day = int(start_date[3:5])
start_date = str(start_year) + '-' + start_date
end_date = str(end_year) + '-' + end_date
# Define a collection filter by date, bounds, and quality.
def colFilter(col, aoi): # , startDate, endDate):
return(col.filterBounds(aoi))
# Get Sentinel 2 collections, both Level-1C (top of atmophere) and Level-2A (surface reflectance)
MSILCcol = ee.ImageCollection('COPERNICUS/S2')
MSI2Acol = ee.ImageCollection('COPERNICUS/S2_SR')
# Define a collection filter by date, bounds, and quality.
def colFilter(col, roi, start_date, end_date):
return(col
.filterBounds(roi)
.filterDate(start_date, end_date))
# .filter('CLOUD_COVER < 5')
# .filter('GEOMETRIC_RMSE_MODEL < 15')
# .filter('IMAGE_QUALITY == 9 || IMAGE_QUALITY_OLI == 9'))
# Function to get and rename bands of interest from MSI
def renameMSI(img):
return(img.select(
['B2', 'B3', 'B4', 'B5', 'B6', 'B7',
'B8', 'B8A', 'B11', 'B12', 'QA60'],
['Blue', 'Green', 'Red', 'Red Edge 1', 'Red Edge 2', 'Red Edge 3', 'NIR', 'Red Edge 4', 'SWIR1', 'SWIR2', 'QA60']))
# Add NBR for LandTrendr segmentation.
def calcNbr(img):
return(img.addBands(img.normalizedDifference(['NIR', 'SWIR2'])
.multiply(-10000).rename('NBR')).int16())
# Define function to mask out clouds and cloud shadows in images.
# Use CFmask band included in USGS Landsat SR image product.
def fmask(img):
cloudOpaqueBitMask = 1 << 10
cloudCirrusBitMask = 1 << 11
qa = img.select('QA60')
mask = qa.bitwiseAnd(cloudOpaqueBitMask).eq(0) \
.And(qa.bitwiseAnd(cloudCirrusBitMask).eq(0))
return(img.updateMask(mask))
# Define function to prepare MSI images.
def prepMSI(img):
orig = img
img = renameMSI(img)
img = fmask(img)
return(ee.Image(img.copyProperties(orig, orig.propertyNames()))
.resample('bicubic'))
# Get annual median collection.
def getAnnualComp(y):
startDate = ee.Date.fromYMD(
ee.Number(y), ee.Number(start_month), ee.Number(start_day))
endDate = startDate.advance(ee.Number(n_days), 'day')
# Filter collections and prepare them for merging.
MSILCcoly = colFilter(MSILCcol, roi, startDate, endDate).map(prepMSI)
MSI2Acoly = colFilter(MSI2Acol, roi, startDate, endDate).map(prepMSI)
# Merge the collections.
col = MSILCcoly.merge(MSI2Acoly)
yearImg = col.median()
nBands = yearImg.bandNames().size()
yearImg = ee.Image(ee.Algorithms.If(
nBands,
yearImg,
dummyImg))
return(calcNbr(yearImg)
.set({'year': y, 'system:time_start': startDate.millis(), 'nBands': nBands}))
################################################################################
# Make a dummy image for missing years.
bandNames = ee.List(['Blue', 'Green', 'Red', 'Red Edge 1',
'Red Edge 2', 'Red Edge 3', 'NIR',
'Red Edge 4', 'SWIR1', 'SWIR2', 'QA60'])
fillerValues = ee.List.repeat(0, bandNames.size())
dummyImg = ee.Image.constant(fillerValues).rename(bandNames) \
.selfMask().int16()
################################################################################
# Get a list of years
years = ee.List.sequence(start_year, end_year)
################################################################################
# Make list of annual image composites.
imgList = years.map(getAnnualComp)
# Convert image composite list to collection
imgCol = ee.ImageCollection.fromImages(imgList)
imgCol = imgCol.map(lambda img: img.clip(roi))
return imgCol
def landsat_timeseries(roi=None, start_year=1984, end_year=2020, start_date='06-10', end_date='09-20', apply_fmask=True):
"""Generates an annual Landsat ImageCollection. This algorithm is adapted from https://gist.github.com/jdbcode/76b9ac49faf51627ebd3ff988e10adbc. A huge thank you to Justin Braaten for sharing his fantastic work.
Args:
roi (object, optional): Region of interest to create the timelapse. Defaults to None.
start_year (int, optional): Starting year for the timelapse. Defaults to 1984.
end_year (int, optional): Ending year for the timelapse. Defaults to 2020.
start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.
end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.
apply_fmask (bool, optional): Whether to apply Fmask (Function of mask) for automated clouds, cloud shadows, snow, and water masking.
Returns:
object: Returns an ImageCollection containing annual Landsat images.
"""
################################################################################
# Input and output parameters.
import re
import datetime
if roi is None:
roi = ee.Geometry.Polygon(
[[[-115.471773, 35.892718],
[-115.471773, 36.409454],
[-114.271283, 36.409454],
[-114.271283, 35.892718],
[-115.471773, 35.892718]]], None, False)
if not isinstance(roi, ee.Geometry):
try:
roi = roi.geometry()
except Exception as e:
print('Could not convert the provided roi to ee.Geometry')
print(e)
return
################################################################################
# Setup vars to get dates.
if isinstance(start_year, int) and (start_year >= 1984) and (start_year < 2020):
pass
else:
print('The start year must be an integer >= 1984.')
return
if isinstance(end_year, int) and (end_year > 1984) and (end_year <= 2020):
pass
else:
print('The end year must be an integer <= 2020.')
return
if re.match("[0-9]{2}\-[0-9]{2}", start_date) and re.match("[0-9]{2}\-[0-9]{2}", end_date):
pass
else:
print('The start date and end date must be month-day, such as 06-10, 09-20')
return
try:
datetime.datetime(int(start_year), int(
start_date[:2]), int(start_date[3:5]))
datetime.datetime(int(end_year), int(end_date[:2]), int(end_date[3:5]))
except Exception as e:
print('The input dates are invalid.')
return
def days_between(d1, d2):
d1 = datetime.datetime.strptime(d1, "%Y-%m-%d")
d2 = datetime.datetime.strptime(d2, "%Y-%m-%d")
return abs((d2 - d1).days)
n_days = days_between(str(start_year) + '-' + start_date,
str(start_year) + '-' + end_date)
start_month = int(start_date[:2])
start_day = int(start_date[3:5])
start_date = str(start_year) + '-' + start_date
end_date = str(end_year) + '-' + end_date
# Define a collection filter by date, bounds, and quality.
def colFilter(col, aoi): # , startDate, endDate):
return(col.filterBounds(aoi))
# Landsat collection preprocessingEnabled
# Get Landsat surface reflectance collections for OLI, ETM+ and TM sensors.
LC08col = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
LE07col = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR')
LT05col = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR')
LT04col = ee.ImageCollection('LANDSAT/LT04/C01/T1_SR')
# Define a collection filter by date, bounds, and quality.
def colFilter(col, roi, start_date, end_date):
return(col
.filterBounds(roi)
.filterDate(start_date, end_date))
# .filter('CLOUD_COVER < 5')
# .filter('GEOMETRIC_RMSE_MODEL < 15')
# .filter('IMAGE_QUALITY == 9 || IMAGE_QUALITY_OLI == 9'))
# Function to get and rename bands of interest from OLI.
def renameOli(img):
return(img.select(
['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa'],
['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']))
# Function to get and rename bands of interest from ETM+.
def renameEtm(img):
return(img.select(
['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa'],
['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']))
# Add NBR for LandTrendr segmentation.
def calcNbr(img):
return(img.addBands(img.normalizedDifference(['NIR', 'SWIR2'])
.multiply(-10000).rename('NBR')).int16())
# Define function to mask out clouds and cloud shadows in images.
# Use CFmask band included in USGS Landsat SR image product.
def fmask(img):
cloudShadowBitMask = 1 << 3
cloudsBitMask = 1 << 5
qa = img.select('pixel_qa')
mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0) \
.And(qa.bitwiseAnd(cloudsBitMask).eq(0))
return(img.updateMask(mask))
# Define function to prepare OLI images.
def prepOli(img):
orig = img
img = renameOli(img)
if apply_fmask:
img = fmask(img)
return (ee.Image(img.copyProperties(orig, orig.propertyNames()))
.resample('bicubic'))
# Define function to prepare ETM+ images.
def prepEtm(img):
orig = img
img = renameEtm(img)
if apply_fmask:
img = fmask(img)
return(ee.Image(img.copyProperties(orig, orig.propertyNames()))
.resample('bicubic'))
# Get annual median collection.
def getAnnualComp(y):
startDate = ee.Date.fromYMD(
ee.Number(y), ee.Number(start_month), ee.Number(start_day))
endDate = startDate.advance(ee.Number(n_days), 'day')
# Filter collections and prepare them for merging.
LC08coly = colFilter(LC08col, roi, startDate, endDate).map(prepOli)
LE07coly = colFilter(LE07col, roi, startDate, endDate).map(prepEtm)
LT05coly = colFilter(LT05col, roi, startDate, endDate).map(prepEtm)
LT04coly = colFilter(LT04col, roi, startDate, endDate).map(prepEtm)
# Merge the collections.
col = LC08coly.merge(LE07coly).merge(LT05coly).merge(LT04coly)
yearImg = col.median()
nBands = yearImg.bandNames().size()
yearImg = ee.Image(ee.Algorithms.If(
nBands,
yearImg,
dummyImg))
return(calcNbr(yearImg)
.set({'year': y, 'system:time_start': startDate.millis(), 'nBands': nBands}))
################################################################################
# Make a dummy image for missing years.
bandNames = ee.List(['Blue', 'Green', 'Red', 'NIR',
'SWIR1', 'SWIR2', 'pixel_qa'])
fillerValues = ee.List.repeat(0, bandNames.size())
dummyImg = ee.Image.constant(fillerValues).rename(bandNames) \
.selfMask().int16()
################################################################################
# Get a list of years
years = ee.List.sequence(start_year, end_year)
################################################################################
# Make list of annual image composites.
imgList = years.map(getAnnualComp)
# Convert image composite list to collection
imgCol = ee.ImageCollection.fromImages(imgList)
imgCol = imgCol.map(lambda img: img.clip(
roi).set({'coordinates': roi.coordinates()}))
return imgCol
# ################################################################################
# # Run LandTrendr.
# lt = ee.Algorithms.TemporalSegmentation.LandTrendr(
# timeSeries=imgCol.select(['NBR', 'SWIR1', 'NIR', 'Green']),
# maxSegments=10,
# spikeThreshold=0.7,
# vertexCountOvershoot=3,
# preventOneYearRecovery=True,
# recoveryThreshold=0.5,
# pvalThreshold=0.05,
# bestModelProportion=0.75,
# minObservationsNeeded=6)
# ################################################################################
# # Get fitted imagery. This starts export tasks.
# def getYearStr(year):
# return(ee.String('yr_').cat(ee.Algorithms.String(year).slice(0,4)))
# yearsStr = years.map(getYearStr)
# r = lt.select(['SWIR1_fit']).arrayFlatten([yearsStr]).toShort()
# g = lt.select(['NIR_fit']).arrayFlatten([yearsStr]).toShort()
# b = lt.select(['Green_fit']).arrayFlatten([yearsStr]).toShort()
# for i, c in zip([r, g, b], ['r', 'g', 'b']):
# descr = 'mamore-river-'+c
# name = 'users/user/'+descr
# print(name)
# task = ee.batch.Export.image.toAsset(
# image=i,
# region=roi.getInfo()['coordinates'],
# assetId=name,
# description=descr,
# scale=30,
# crs='EPSG:3857',
# maxPixels=1e13)
# task.start()
def landsat_ts_gif(roi=None, out_gif=None, start_year=1984, end_year=2019, start_date='06-10', end_date='09-20', bands=['NIR', 'Red', 'Green'], vis_params=None, dimensions=768, frames_per_second=10, apply_fmask=True, nd_bands=None, nd_threshold=0, nd_palette=['black', 'blue']):
"""Generates a Landsat timelapse GIF image. This function is adapted from https://emaprlab.users.earthengine.app/view/lt-gee-time-series-animator. A huge thank you to Justin Braaten for sharing his fantastic work.
Args:
roi (object, optional): Region of interest to create the timelapse. Defaults to None.
out_gif (str, optional): File path to the output animated GIF. Defaults to None.
start_year (int, optional): Starting year for the timelapse. Defaults to 1984.
end_year (int, optional): Ending year for the timelapse. Defaults to 2019.
start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.
end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.
bands (list, optional): Three bands selected from ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']. Defaults to ['NIR', 'Red', 'Green'].
vis_params (dict, optional): Visualization parameters. Defaults to None.
dimensions (int, optional): a number or pair of numbers in format WIDTHxHEIGHT) Maximum dimensions of the thumbnail to render, in pixels. If only one number is passed, it is used as the maximum, and the other dimension is computed by proportional scaling. Defaults to 768.
frames_per_second (int, optional): Animation speed. Defaults to 10.
apply_fmask (bool, optional): Whether to apply Fmask (Function of mask) for automated clouds, cloud shadows, snow, and water masking.
nd_bands (list, optional): A list of names specifying the bands to use, e.g., ['Green', 'SWIR1']. The normalized difference is computed as (first − second) / (first + second). Note that negative input values are forced to 0 so that the result is confined to the range (-1, 1).
nd_threshold (float, optional): The threshold for extacting pixels from the normalized difference band.
nd_palette (list, optional): The color palette to use for displaying the normalized difference band.
Returns:
str: File path to the output GIF image.
"""
# ee_initialize()
if roi is None:
roi = ee.Geometry.Polygon(
[[[-115.471773, 35.892718],
[-115.471773, 36.409454],
[-114.271283, 36.409454],
[-114.271283, 35.892718],
[-115.471773, 35.892718]]], None, False)
elif isinstance(roi, ee.Feature) or isinstance(roi, ee.FeatureCollection):
roi = roi.geometry()
elif isinstance(roi, ee.Geometry):
pass
else:
print('The provided roi is invalid. It must be an ee.Geometry')
return
if out_gif is None:
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
filename = 'landsat_ts_' + random_string() + '.gif'
out_gif = os.path.join(out_dir, filename)
elif not out_gif.endswith('.gif'):
print('The output file must end with .gif')
return
# elif not os.path.isfile(out_gif):
# print('The output file must be a file')
# return
else:
out_gif = os.path.abspath(out_gif)
out_dir = os.path.dirname(out_gif)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
allowed_bands = ['Blue', 'Green', 'Red',
'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']
if len(bands) == 3 and all(x in allowed_bands for x in bands):
pass
else:
raise Exception('You can only select 3 bands from the following: {}'.format(
', '.join(allowed_bands)))
if nd_bands is not None:
if len(nd_bands) == 2 and all(x in allowed_bands[:-1] for x in nd_bands):
pass
else:
raise Exception('You can only select two bands from the following: {}'.format(
', '.join(allowed_bands[:-1])))
try:
col = landsat_timeseries(
roi, start_year, end_year, start_date, end_date, apply_fmask)
if vis_params is None:
vis_params = {}
vis_params['bands'] = bands
vis_params['min'] = 0
vis_params['max'] = 4000
vis_params['gamma'] = [1, 1, 1]
video_args = vis_params.copy()
video_args['dimensions'] = dimensions
video_args['region'] = roi
video_args['framesPerSecond'] = frames_per_second
video_args['crs'] = 'EPSG:3857'
if 'bands' not in video_args.keys():
video_args['bands'] = bands
if 'min' not in video_args.keys():
video_args['min'] = 0
if 'max' not in video_args.keys():
video_args['max'] = 4000
if 'gamma' not in video_args.keys():
video_args['gamma'] = [1, 1, 1]
download_ee_video(col, video_args, out_gif)
if nd_bands is not None:
nd_images = landsat_ts_norm_diff(
col, bands=nd_bands, threshold=nd_threshold)
out_nd_gif = out_gif.replace('.gif', '_nd.gif')
landsat_ts_norm_diff_gif(nd_images, out_gif=out_nd_gif, vis_params=None,
palette=nd_palette, dimensions=dimensions, frames_per_second=frames_per_second)
return out_gif
except Exception as e:
print(e)
def minimum_bounding_box(geojson):
"""Gets the minimum bounding box for a geojson polygon.
Args:
geojson (dict): A geojson dictionary.
Returns:
tuple: Returns a tuple containing the minimum bounding box in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -120)).
"""
coordinates = []
try:
if 'geometry' in geojson.keys():
coordinates = geojson['geometry']['coordinates'][0]
else:
coordinates = geojson['coordinates'][0]
lower_left = min([x[1] for x in coordinates]), min(
[x[0] for x in coordinates]) # (lat, lon)
upper_right = max([x[1] for x in coordinates]), max([x[0]
for x in coordinates]) # (lat, lon)
bounds = (lower_left, upper_right)
return bounds
except Exception as e:
# print(e)
return None
def geocode(location, max_rows=10, reverse=False):
"""Search location by address and lat/lon coordinates.
Args:
location (str): Place name or address
max_rows (int, optional): Maximum number of records to return. Defaults to 10.
reverse (bool, optional): Search place based on coordinates. Defaults to False.
Returns:
list: Returns a list of locations.
"""
if not isinstance(location, str):
print('The location must be a string.')
return None
if not reverse:
locations = []
addresses = set()
g = geocoder.arcgis(location, maxRows=max_rows)
for result in g:
address = result.address
if not address in addresses:
addresses.add(address)
locations.append(result)
if len(locations) > 0:
return locations
else:
return None
else:
try:
if ',' in location:
latlon = [float(x) for x in location.split(',')]
elif ' ' in location:
latlon = [float(x) for x in location.split(' ')]
else:
print(
'The lat-lon coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')
return
g = geocoder.arcgis(latlon, method='reverse')
locations = []
addresses = set()
for result in g:
address = result.address
if not address in addresses:
addresses.add(address)
locations.append(result)
if len(locations) > 0:
return locations
else:
return None
except Exception as e:
print(e)
return None
def is_latlon_valid(location):
"""Checks whether a pair of coordinates is valid.
Args:
location (str): A pair of latlon coordinates separated by comma or space.
Returns:
bool: Returns True if valid.
"""
latlon = []
if ',' in location:
latlon = [float(x) for x in location.split(',')]
elif ' ' in location:
latlon = [float(x) for x in location.split(' ')]
else:
print(
'The coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')
return False
try:
lat, lon = float(latlon[0]), float(latlon[1])
if lat >= -90 and lat <= 90 and lon >= -180 and lat <= 180:
return True
else:
return False
except Exception as e:
print(e)
return False
def latlon_from_text(location):
"""Extracts latlon from text.
Args:
location (str): A pair of latlon coordinates separated by comma or space.
Returns:
bool: Returns (lat, lon) if valid.
"""
latlon = []
try:
if ',' in location:
latlon = [float(x) for x in location.split(',')]
elif ' ' in location:
latlon = [float(x) for x in location.split(' ')]
else:
print(
'The lat-lon coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')
return None
lat, lon = latlon[0], latlon[1]
if lat >= -90 and lat <= 90 and lon >= -180 and lat <= 180:
return lat, lon
else:
return None
except Exception as e:
print(e)
print('The lat-lon coordinates should be numbers only and separated by comma or space, such as 40.2, -100.3')
return None
def search_ee_data(keywords):
"""Searches Earth Engine data catalog.
Args:
keywords (str): Keywords to search for can be id, provider, tag and so on
Returns:
list: Returns a lit of assets.
"""
try:
cmd = 'geeadd search --keywords "{}"'.format(str(keywords))
output = os.popen(cmd).read()
start_index = output.index('[')
assets = eval(output[start_index:])
results = []
for asset in assets:
asset_dates = asset['start_date'] + ' - ' + asset['end_date']
asset_snippet = asset['ee_id_snippet']
start_index = asset_snippet.index("'") + 1
end_index = asset_snippet.index("'", start_index)
asset_id = asset_snippet[start_index:end_index]
asset['dates'] = asset_dates
asset['id'] = asset_id
asset['uid'] = asset_id.replace('/', '_')
# asset['url'] = 'https://developers.google.com/earth-engine/datasets/catalog/' + asset['uid']
# asset['thumbnail'] = 'https://mw1.google.com/ges/dd/images/{}_sample.png'.format(
# asset['uid'])
results.append(asset)
return results
except Exception as e:
print(e)
def ee_data_thumbnail(asset_id):
"""Retrieves the thumbnail URL of an Earth Engine asset.
Args:
asset_id (str): An Earth Engine asset id.
Returns:
str: An http url of the thumbnail.
"""
import requests
import urllib
from bs4 import BeautifulSoup
asset_uid = asset_id.replace('/', '_')
asset_url = "https://developers.google.com/earth-engine/datasets/catalog/{}".format(
asset_uid)
thumbnail_url = 'https://mw1.google.com/ges/dd/images/{}_sample.png'.format(
asset_uid)
r = requests.get(thumbnail_url)
try:
if r.status_code != 200:
html_page = urllib.request.urlopen(asset_url)
soup = BeautifulSoup(html_page, features="html.parser")
for img in soup.findAll('img'):
if 'sample.png' in img.get('src'):
thumbnail_url = img.get('src')
return thumbnail_url
return thumbnail_url
except Exception as e:
print(e)
def ee_data_html(asset):
"""Generates HTML from an asset to be used in the HTML widget.
Args:
asset (dict): A dictionary containing an Earth Engine asset.
Returns:
str: A string containing HTML.
"""
template = '''
<html>
<body>
<h3>asset_title</h3>
<h4>Dataset Availability</h4>
<p style="margin-left: 40px">asset_dates</p>
<h4>Earth Engine Snippet</h4>
<p style="margin-left: 40px">ee_id_snippet</p>
<h4>Earth Engine Data Catalog</h4>
<p style="margin-left: 40px"><a href="asset_url" target="_blank">asset_id</a></p>
<h4>Dataset Thumbnail</h4>
<img src="thumbnail_url">
</body>
</html>
'''
try:
text = template.replace('asset_title', asset['title'])
text = text.replace('asset_dates', asset['dates'])
text = text.replace('ee_id_snippet', asset['ee_id_snippet'])
text = text.replace('asset_id', asset['id'])
text = text.replace('asset_url', asset['asset_url'])
# asset['thumbnail'] = ee_data_thumbnail(asset['id'])
text = text.replace('thumbnail_url', asset['thumbnail_url'])
return text
except Exception as e:
print(e)
def create_code_cell(code='', where='below'):
"""Creates a code cell in the IPython Notebook.
Args:
code (str, optional): Code to fill the new code cell with. Defaults to ''.
where (str, optional): Where to add the new code cell. It can be one of the following: above, below, at_bottom. Defaults to 'below'.
"""
import base64
from IPython.display import Javascript, display
encoded_code = (base64.b64encode(str.encode(code))).decode()
display(Javascript("""
var code = IPython.notebook.insert_cell_{0}('code');
code.set_text(atob("{1}"));
""".format(where, encoded_code)))
def ee_api_to_csv(outfile=None):
"""Extracts Earth Engine API documentation from https://developers.google.com/earth-engine/api_docs as a csv file.
Args:
outfile (str, optional): The output file path to a csv file. Defaults to None.
"""
import csv
import requests
from bs4 import BeautifulSoup
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py"))
data_dir = os.path.join(pkg_dir, 'data')
template_dir = os.path.join(data_dir, 'template')
csv_file = os.path.join(template_dir, 'ee_api_docs.csv')
if outfile is None:
outfile = csv_file
else:
if not outfile.endswith('.csv'):
print('The output file must end with .csv')
return
else:
out_dir = os.path.dirname(outfile)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
url = 'https://developers.google.com/earth-engine/api_docs'
try:
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
names = []
descriptions = []
functions = []
returns = []
arguments = []
types = []
details = []
names = [h2.text for h2 in soup.find_all('h2')]
descriptions = [
h2.next_sibling.next_sibling.text for h2 in soup.find_all('h2')]
func_tables = soup.find_all('table', class_='blue')
functions = [func_table.find(
'code').text for func_table in func_tables]
returns = [func_table.find_all(
'td')[1].text for func_table in func_tables]
detail_tables = []
tables = soup.find_all('table', class_='blue')
for table in tables:
item = table.next_sibling
if item.attrs == {'class': ['details']}:
detail_tables.append(item)
else:
detail_tables.append("")
for detail_table in detail_tables:
if detail_table != '':
items = [item.text for item in detail_table.find_all('code')]
else:
items = ""
arguments.append(items)
for detail_table in detail_tables:
if detail_table != '':
items = [item.text for item in detail_table.find_all('td')]
items = items[1::3]
else:
items = ""
types.append(items)
for detail_table in detail_tables:
if detail_table != '':
items = [item.text for item in detail_table.find_all('p')]
else:
items = ""
details.append(items)
csv_file = open(outfile, 'w', encoding='utf-8')
csv_writer = csv.writer(csv_file, delimiter='\t')
csv_writer.writerow(
['name', 'description', 'function', 'returns', 'argument', 'type', 'details'])
for i in range(len(names)):
name = names[i]
description = descriptions[i]
function = functions[i]
return_type = returns[i]
argument = '|'.join(arguments[i])
argu_type = '|'.join(types[i])
detail = '|'.join(details[i])
csv_writer.writerow(
[name, description, function, return_type, argument, argu_type, detail])
csv_file.close()
except Exception as e:
print(e)
def read_api_csv():
"""Extracts Earth Engine API from a csv file and returns a dictionary containing information about each function.
Returns:
dict: The dictionary containing information about each function, including name, description, function form, return type, arguments, html.
"""
import copy
import csv
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py"))
data_dir = os.path.join(pkg_dir, 'data')
template_dir = os.path.join(data_dir, 'template')
csv_file = os.path.join(template_dir, 'ee_api_docs.csv')
html_file = os.path.join(template_dir, 'ee_api_docs.html')
with open(html_file) as f:
in_html_lines = f.readlines()
api_dict = {}
with open(csv_file, 'r', encoding='utf-8') as f:
csv_reader = csv.DictReader(f, delimiter='\t')
for line in csv_reader:
out_html_lines = copy.copy(in_html_lines)
out_html_lines[65] = in_html_lines[65].replace(
'function_name', line['name'])
out_html_lines[66] = in_html_lines[66].replace(
'function_description', line.get('description'))
out_html_lines[74] = in_html_lines[74].replace(
'function_usage', line.get('function'))
out_html_lines[75] = in_html_lines[75].replace(
'function_returns', line.get('returns'))
arguments = line.get('argument')
types = line.get('type')
details = line.get('details')
if '|' in arguments:
argument_items = arguments.split('|')
else:
argument_items = [arguments]
if '|' in types:
types_items = types.split('|')
else:
types_items = [types]
if '|' in details:
details_items = details.split('|')
else:
details_items = [details]
out_argument_lines = []
for index in range(len(argument_items)):
in_argument_lines = in_html_lines[87:92]
in_argument_lines[1] = in_argument_lines[1].replace(
'function_argument', argument_items[index])
in_argument_lines[2] = in_argument_lines[2].replace(
'function_type', types_items[index])
in_argument_lines[3] = in_argument_lines[3].replace(
'function_details', details_items[index])
out_argument_lines.append("".join(in_argument_lines))
out_html_lines = out_html_lines[:87] + \
out_argument_lines + out_html_lines[92:]
contents = ''.join(out_html_lines)
api_dict[line['name']] = {
'description': line.get('description'),
'function': line.get('function'),
'returns': line.get('returns'),
'argument': line.get('argument'),
'type': line.get('type'),
'details': line.get('details'),
'html': contents
}
return api_dict
def ee_function_tree(name):
"""Construct the tree structure based on an Earth Engine function. For example, the function "ee.Algorithms.FMask.matchClouds" will return a list ["ee.Algorithms", "ee.Algorithms.FMask", "ee.Algorithms.FMask.matchClouds"]
Args:
name (str): The name of the Earth Engine function
Returns:
list: The list for parent functions.
"""
func_list = []
try:
items = name.split('.')
if items[0] == 'ee':
for i in range(2, len(items) + 1):
func_list.append('.'.join(items[0:i]))
else:
for i in range(1, len(items) + 1):
func_list.append('.'.join(items[0:i]))
return func_list
except Exception as e:
print(e)
print('The provided function name is invalid.')
def build_api_tree(api_dict, output_widget, layout_width='100%'):
"""Builds an Earth Engine API tree view.
Args:
api_dict (dict): The dictionary containing information about each Earth Engine API function.
output_widget (object): An Output widget.
layout_width (str, optional): The percentage width of the widget. Defaults to '100%'.
Returns:
tuple: Returns a tuple containing two items: a tree Output widget and a tree dictionary.
"""
import warnings
warnings.filterwarnings('ignore')
tree = Tree()
tree_dict = {}
names = api_dict.keys()
def handle_click(event):
if event['new']:
name = event['owner'].name
values = api_dict[name]
with output_widget:
output_widget.clear_output()
html_widget = widgets.HTML(value=values['html'])
display(html_widget)
for name in names:
func_list = ee_function_tree(name)
first = func_list[0]
if first not in tree_dict.keys():
tree_dict[first] = Node(first)
tree_dict[first].opened = False
tree.add_node(tree_dict[first])
for index, func in enumerate(func_list):
if index > 0:
if func not in tree_dict.keys():
node = tree_dict[func_list[index - 1]]
node.opened = False
tree_dict[func] = Node(func)
node.add_node(tree_dict[func])
if index == len(func_list) - 1:
node = tree_dict[func_list[index]]
node.icon = 'file'
node.observe(handle_click, 'selected')
return tree, tree_dict
def search_api_tree(keywords, api_tree):
"""Search Earth Engine API and return functions containing the specified keywords
Args:
keywords (str): The keywords to search for.
api_tree (dict): The dictionary containing the Earth Engine API tree.
Returns:
object: An ipytree object/widget.
"""
import warnings
warnings.filterwarnings('ignore')
sub_tree = Tree()
for key in api_tree.keys():
if keywords in key:
sub_tree.add_node(api_tree[key])
return sub_tree
def ee_search(asset_limit=100):
"""Search Earth Engine API and user assets. If you received a warning (IOPub message rate exceeded) in Jupyter notebook, you can relaunch Jupyter notebook using the following command:
jupyter notebook --NotebookApp.iopub_msg_rate_limit=10000
Args:
asset_limit (int, optional): The number of assets to display for each asset type, i.e., Image, ImageCollection, and FeatureCollection. Defaults to 100.
"""
import warnings
warnings.filterwarnings('ignore')
class Flags:
def __init__(self, repos=None, docs=None, assets=None, docs_dict=None, asset_dict=None, asset_import=None):
self.repos = repos
self.docs = docs
self.assets = assets
self.docs_dict = docs_dict
self.asset_dict = asset_dict
self.asset_import = asset_import
flags = Flags()
search_type = widgets.ToggleButtons(
options=['Scripts', 'Docs', 'Assets'],
tooltips=['Search Earth Engine Scripts',
'Search Earth Engine API', 'Search Earth Engine Assets'],
button_style='primary'
)
search_type.style.button_width = '100px'
search_box = widgets.Text(
placeholder='Filter scripts...', value='Loading...')
search_box.layout.width = '310px'
tree_widget = widgets.Output()
left_widget = widgets.VBox()
right_widget = widgets.VBox()
output_widget = widgets.Output()
output_widget.layout.max_width = '650px'
search_widget = widgets.HBox()
search_widget.children = [left_widget, right_widget]
display(search_widget)
repo_tree, repo_output, _ = build_repo_tree()
left_widget.children = [search_type, repo_tree]
right_widget.children = [repo_output]
flags.repos = repo_tree
search_box.value = ''
def search_type_changed(change):
search_box.value = ''
output_widget.clear_output()
tree_widget.clear_output()
if change['new'] == 'Scripts':
search_box.placeholder = 'Filter scripts...'
left_widget.children = [search_type, repo_tree]
right_widget.children = [repo_output]
elif change['new'] == 'Docs':
search_box.placeholder = 'Filter methods...'
search_box.value = 'Loading...'
left_widget.children = [search_type, search_box, tree_widget]
right_widget.children = [output_widget]
if flags.docs is None:
api_dict = read_api_csv()
ee_api_tree, tree_dict = build_api_tree(
api_dict, output_widget)
flags.docs = ee_api_tree
flags.docs_dict = tree_dict
else:
ee_api_tree = flags.docs
with tree_widget:
tree_widget.clear_output()
display(ee_api_tree)
right_widget.children = [output_widget]
search_box.value = ''
elif change['new'] == 'Assets':
search_box.placeholder = 'Filter assets...'
left_widget.children = [search_type, search_box, tree_widget]
right_widget.children = [output_widget]
search_box.value = 'Loading...'
if flags.assets is None:
asset_tree, asset_widget, asset_dict = build_asset_tree(
limit=asset_limit)
flags.assets = asset_tree
flags.asset_dict = asset_dict
flags.asset_import = asset_widget
with tree_widget:
tree_widget.clear_output()
display(flags.assets)
right_widget.children = [flags.asset_import]
search_box.value = ''
search_type.observe(search_type_changed, names='value')
def search_box_callback(text):
if search_type.value == 'Docs':
with tree_widget:
if text.value == '':
print('Loading...')
tree_widget.clear_output(wait=True)
display(flags.docs)
else:
tree_widget.clear_output()
print('Searching...')
tree_widget.clear_output(wait=True)
sub_tree = search_api_tree(text.value, flags.docs_dict)
display(sub_tree)
elif search_type.value == 'Assets':
with tree_widget:
if text.value == '':
print('Loading...')
tree_widget.clear_output(wait=True)
display(flags.assets)
else:
tree_widget.clear_output()
print('Searching...')
tree_widget.clear_output(wait=True)
sub_tree = search_api_tree(text.value, flags.asset_dict)
display(sub_tree)
search_box.on_submit(search_box_callback)
def ee_user_id():
"""Gets Earth Engine account user id.
Returns:
str: A string containing the user id.
"""
# ee_initialize()
roots = ee.data.getAssetRoots()
if len(roots) == 0:
return None
else:
root = ee.data.getAssetRoots()[0]
user_id = root['id'].replace("projects/earthengine-legacy/assets/", "")
return user_id
def build_asset_tree(limit=100):
import warnings
import geeadd.ee_report as geeadd
warnings.filterwarnings('ignore')
# ee_initialize()
tree = Tree(multiple_selection=False)
tree_dict = {}
asset_types = {}
asset_icons = {
'FOLDER': 'folder',
'TABLE': 'table',
'IMAGE': 'image',
'IMAGE_COLLECTION': 'file'
}
info_widget = widgets.HBox()
import_btn = widgets.Button(
description='import',
button_style='primary',
tooltip='Click to import the selected asset',
disabled=True
)
import_btn.layout.min_width = '57px'
import_btn.layout.max_width = '57px'
path_widget = widgets.Text()
path_widget.layout.min_width = '500px'
# path_widget.disabled = True
info_widget.children = [import_btn, path_widget]
user_id = ee_user_id()
if user_id is None:
print('Your GEE account does not have any assets. Please create a repository at https://code.earthengine.google.com')
return
user_path = 'projects/earthengine-legacy/assets/' + user_id
root_node = Node(user_id)
root_node.opened = True
tree_dict[user_id] = root_node
tree.add_node(root_node)
collection_list, table_list, image_list, folder_paths = geeadd.fparse(
user_path)
collection_list = collection_list[:limit]
table_list = table_list[:limit]
image_list = image_list[:limit]
folder_paths = folder_paths[:limit]
folders = [p[35:] for p in folder_paths[1:]]
asset_type = 'FOLDER'
for folder in folders:
bare_folder = folder.replace(user_id + '/', '')
if folder not in tree_dict.keys():
node = Node(bare_folder)
node.opened = False
node.icon = asset_icons[asset_type]
root_node.add_node(node)
tree_dict[folder] = node
asset_types[folder] = asset_type
def import_btn_clicked(b):
if path_widget.value != '':
dataset_uid = 'dataset_' + random_string(string_length=3)
layer_name = path_widget.value.split('/')[-1][:-2:]
line1 = '{} = {}\n'.format(
dataset_uid, path_widget.value)
line2 = 'Map.addLayer(' + dataset_uid + \
', {}, "' + layer_name + '")'
contents = ''.join([line1, line2])
create_code_cell(contents)
import_btn.on_click(import_btn_clicked)
def handle_click(event):
if event['new']:
cur_node = event['owner']
for key in tree_dict.keys():
if cur_node is tree_dict[key]:
if asset_types[key] == 'IMAGE':
path_widget.value = "ee.Image('{}')".format(key)
elif asset_types[key] == 'IMAGE_COLLECTION':
path_widget.value = "ee.ImageCollection('{}')".format(
key)
elif asset_types[key] == 'TABLE':
path_widget.value = "ee.FeatureCollection('{}')".format(
key)
if import_btn.disabled:
import_btn.disabled = False
break
assets = [collection_list, image_list, table_list]
for index, asset_list in enumerate(assets):
if index == 0:
asset_type = 'IMAGE_COLLECTION'
elif index == 1:
asset_type = 'IMAGE'
else:
asset_type = 'TABLE'
for asset in asset_list:
items = asset.split('/')
parent = '/'.join(items[:-1])
child = items[-1]
parent_node = tree_dict[parent]
child_node = Node(child)
child_node.icon = asset_icons[asset_type]
parent_node.add_node(child_node)
tree_dict[asset] = child_node
asset_types[asset] = asset_type
child_node.observe(handle_click, 'selected')
return tree, info_widget, tree_dict
def build_repo_tree(out_dir=None, name='gee_repos'):
"""Builds a repo tree for GEE account.
Args:
out_dir (str): The output directory for the repos. Defaults to None.
name (str, optional): The output name for the repo directory. Defaults to 'gee_repos'.
Returns:
tuple: Returns a tuple containing a tree widget, an output widget, and a tree dictionary containing nodes.
"""
import warnings
warnings.filterwarnings('ignore')
if out_dir is None:
out_dir = os.path.join(os.path.expanduser('~'))
repo_dir = os.path.join(out_dir, name)
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
URLs = {
# 'Owner': 'https://earthengine.googlesource.com/{}/default'.format(ee_user_id()),
'Writer': '',
'Reader': 'https://github.com/giswqs/geemap',
'Examples': 'https://github.com/giswqs/earthengine-py-examples',
'Archive': 'https://earthengine.googlesource.com/EGU2017-EE101'
}
user_id = ee_user_id()
if user_id is not None:
URLs['Owner'] = 'https://earthengine.googlesource.com/{}/default'.format(
ee_user_id())
path_widget = widgets.Text(
placeholder='Enter the link to a Git repository here...')
path_widget.layout.width = '475px'
clone_widget = widgets.Button(
description='Clone', button_style='primary', tooltip='Clone the repository to folder.')
info_widget = widgets.HBox()
groups = ['Owner', 'Writer', 'Reader', 'Examples', 'Archive']
for group in groups:
group_dir = os.path.join(repo_dir, group)
if not os.path.exists(group_dir):
os.makedirs(group_dir)
example_dir = os.path.join(repo_dir, 'Examples/earthengine-py-examples')
if not os.path.exists(example_dir):
clone_github_repo(URLs['Examples'], out_dir=example_dir)
left_widget, right_widget, tree_dict = file_browser(
in_dir=repo_dir, add_root_node=False, search_description='Filter scripts...', use_import=True, return_sep_widgets=True)
info_widget.children = [right_widget]
def handle_folder_click(event):
if event['new']:
url = ''
selected = event['owner']
if selected.name in URLs.keys():
url = URLs[selected.name]
path_widget.value = url
clone_widget.disabled = False
info_widget.children = [path_widget, clone_widget]
else:
info_widget.children = [right_widget]
for group in groups:
dirname = os.path.join(repo_dir, group)
node = tree_dict[dirname]
node.observe(handle_folder_click, 'selected')
def handle_clone_click(b):
url = path_widget.value
default_dir = os.path.join(repo_dir, 'Examples')
if url == '':
path_widget.value = 'Please enter a valid URL to the repository.'
else:
for group in groups:
key = os.path.join(repo_dir, group)
node = tree_dict[key]
if node.selected:
default_dir = key
try:
path_widget.value = 'Cloning...'
clone_dir = os.path.join(default_dir, os.path.basename(url))
if 'github.com' in url:
clone_github_repo(url, out_dir=clone_dir)
elif 'googlesource' in url:
clone_google_repo(url, out_dir=clone_dir)
path_widget.value = 'Cloned to {}'.format(clone_dir)
clone_widget.disabled = True
except Exception as e:
path_widget.value = 'An error occurred when trying to clone the repository ' + \
str(e)
clone_widget.disabled = True
clone_widget.on_click(handle_clone_click)
return left_widget, info_widget, tree_dict
def file_browser(in_dir=None, show_hidden=False, add_root_node=True, search_description=None, use_import=False, return_sep_widgets=False):
"""Creates a simple file browser and text editor.
Args:
in_dir (str, optional): The input directory. Defaults to None, which will use the current working directory.
show_hidden (bool, optional): Whether to show hidden files/folders. Defaults to False.
add_root_node (bool, optional): Whether to add the input directory as a root node. Defaults to True.
search_description (str, optional): The description of the search box. Defaults to None.
use_import (bool, optional): Whether to show the import button. Defaults to False.
return_sep_widgets (bool, optional): Whether to return the results as separate widgets. Defaults to False.
Returns:
object: An ipywidget.
"""
import platform
if in_dir is None:
in_dir = os.getcwd()
if not os.path.exists(in_dir):
print('The provided directory does not exist.')
return
elif not os.path.isdir(in_dir):
print('The provided path is not a valid directory.')
return
sep = '/'
if platform.system() == "Windows":
sep = '\\'
if in_dir.endswith(sep):
in_dir = in_dir[:-1]
full_widget = widgets.HBox()
left_widget = widgets.VBox()
right_widget = widgets.VBox()
import_btn = widgets.Button(
description='import', button_style='primary', tooltip='import the content to a new cell', disabled=True)
import_btn.layout.width = '70px'
path_widget = widgets.Text()
path_widget.layout.min_width = '400px'
# path_widget.layout.max_width = '400px'
save_widget = widgets.Button(
description='Save', button_style='primary', tooltip='Save edits to file.', disabled=True)
info_widget = widgets.HBox()
info_widget.children = [path_widget, save_widget]
if use_import:
info_widget.children = [import_btn, path_widget, save_widget]
text_widget = widgets.Textarea()
text_widget.layout.width = '630px'
text_widget.layout.height = '600px'
right_widget.children = [info_widget, text_widget]
full_widget.children = [left_widget]
if search_description is None:
search_description = 'Search files/folders...'
search_box = widgets.Text(placeholder=search_description)
search_box.layout.width = '310px'
tree_widget = widgets.Output()
tree_widget.layout.max_width = '310px'
tree_widget.overflow = 'auto'
left_widget.children = [search_box, tree_widget]
tree = Tree(multiple_selection=False)
tree_dict = {}
def on_button_clicked(b):
content = text_widget.value
out_file = path_widget.value
out_dir = os.path.dirname(out_file)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(out_file, 'w') as f:
f.write(content)
text_widget.disabled = True
text_widget.value = 'The content has been saved successfully.'
save_widget.disabled = True
path_widget.disabled = True
if (out_file not in tree_dict.keys()) and (out_dir in tree_dict.keys()):
node = Node(os.path.basename(out_file))
tree_dict[out_file] = node
parent_node = tree_dict[out_dir]
parent_node.add_node(node)
save_widget.on_click(on_button_clicked)
def import_btn_clicked(b):
if (text_widget.value != '') and (path_widget.value.endswith('.py')):
create_code_cell(text_widget.value)
import_btn.on_click(import_btn_clicked)
def search_box_callback(text):
with tree_widget:
if text.value == '':
print('Loading...')
tree_widget.clear_output(wait=True)
display(tree)
else:
tree_widget.clear_output()
print('Searching...')
tree_widget.clear_output(wait=True)
sub_tree = search_api_tree(text.value, tree_dict)
display(sub_tree)
search_box.on_submit(search_box_callback)
def handle_file_click(event):
if event['new']:
cur_node = event['owner']
for key in tree_dict.keys():
if (cur_node is tree_dict[key]) and (os.path.isfile(key)):
if key.endswith('.py'):
import_btn.disabled = False
else:
import_btn.disabled = True
try:
with open(key) as f:
content = f.read()
text_widget.value = content
text_widget.disabled = False
path_widget.value = key
path_widget.disabled = False
save_widget.disabled = False
full_widget.children = [left_widget, right_widget]
except Exception as e:
path_widget.value = key
path_widget.disabled = True
save_widget.disabled = True
text_widget.disabled = True
text_widget.value = 'Failed to open {}.'.format(
cur_node.name) + '\n\n' + str(e)
full_widget.children = [left_widget, right_widget]
return
break
def handle_folder_click(event):
if event['new']:
full_widget.children = [left_widget]
text_widget.value = ''
if add_root_node:
root_name = in_dir.split(sep)[-1]
root_node = Node(root_name)
tree_dict[in_dir] = root_node
tree.add_node(root_node)
root_node.observe(handle_folder_click, 'selected')
for root, d_names, f_names in os.walk(in_dir):
if not show_hidden:
folders = root.split(sep)
for folder in folders:
if folder.startswith('.'):
continue
for d_name in d_names:
if d_name.startswith('.'):
d_names.remove(d_name)
for f_name in f_names:
if f_name.startswith('.'):
f_names.remove(f_name)
d_names.sort()
f_names.sort()
if (not add_root_node) and (root == in_dir):
for d_name in d_names:
node = Node(d_name)
tree_dict[os.path.join(in_dir, d_name)] = node
tree.add_node(node)
node.opened = False
node.observe(handle_folder_click, 'selected')
if (root != in_dir) and (root not in tree_dict.keys()):
name = root.split(sep)[-1]
dir_name = os.path.dirname(root)
parent_node = tree_dict[dir_name]
node = Node(name)
tree_dict[root] = node
parent_node.add_node(node)
node.observe(handle_folder_click, 'selected')
if len(f_names) > 0:
parent_node = tree_dict[root]
parent_node.opened = False
for f_name in f_names:
node = Node(f_name)
node.icon = 'file'
full_path = os.path.join(root, f_name)
tree_dict[full_path] = node
parent_node.add_node(node)
node.observe(handle_file_click, 'selected')
with tree_widget:
tree_widget.clear_output()
display(tree)
if return_sep_widgets:
return left_widget, right_widget, tree_dict
else:
return full_widget
def check_git_install():
"""Checks if Git is installed.
Returns:
bool: Returns True if Git is installed, otherwise returns False.
"""
import webbrowser
cmd = 'git --version'
output = os.popen(cmd).read()
if 'git version' in output:
return True
else:
url = 'https://git-scm.com/downloads'
print(
"Git is not installed. Please download Git from {} and install it.".format(url))
webbrowser.open_new_tab(url)
return False
def clone_github_repo(url, out_dir):
"""Clones a GitHub repository.
Args:
url (str): The link to the GitHub repository
out_dir (str): The output directory for the cloned repository.
"""
import zipfile
repo_name = os.path.basename(url)
# url_zip = os.path.join(url, 'archive/master.zip')
url_zip = url + '/archive/master.zip'
if os.path.exists(out_dir):
print(
'The specified output directory already exists. Please choose a new directory.')
return
parent_dir = os.path.dirname(out_dir)
out_file_path = os.path.join(parent_dir, repo_name + '.zip')
try:
urllib.request.urlretrieve(url_zip, out_file_path)
except:
print("The provided URL is invalid. Please double check the URL.")
return
with zipfile.ZipFile(out_file_path, "r") as zip_ref:
zip_ref.extractall(parent_dir)
src = out_file_path.replace('.zip', '-master')
os.rename(src, out_dir)
os.remove(out_file_path)
def clone_github_repo2(url, out_dir=None):
"""Clones a GitHub repository.
Args:
url (str): The link to the GitHub repository
out_dir (str, optional): The output directory for the cloned repository. Defaults to None.
"""
check_install('dulwich')
from dulwich import porcelain
repo_name = os.path.basename(url)
if out_dir is None:
out_dir = os.path.join(os.getcwd(), repo_name)
if not os.path.exists(os.path.dirname(out_dir)):
os.makedirs(os.path.dirname(out_dir))
if os.path.exists(out_dir):
print(
'The specified output directory already exists. Please choose a new directory.')
return
try:
porcelain.clone(url, out_dir)
except Exception as e:
print('Failed to clone the repository.')
print(e)
def clone_google_repo(url, out_dir=None):
"""Clones an Earth Engine repository from https://earthengine.googlesource.com, such as https://earthengine.googlesource.com/users/google/datasets
Args:
url (str): The link to the Earth Engine repository
out_dir (str, optional): The output directory for the cloned repository. Defaults to None.
"""
repo_name = os.path.basename(url)
if out_dir is None:
out_dir = os.path.join(os.getcwd(), repo_name)
if not os.path.exists(os.path.dirname(out_dir)):
os.makedirs(os.path.dirname(out_dir))
if os.path.exists(out_dir):
print(
'The specified output directory already exists. Please choose a new directory.')
return
if check_git_install():
cmd = 'git clone "{}" "{}"'.format(url, out_dir)
os.popen(cmd).read()
def reduce_gif_size(in_gif, out_gif=None):
"""Reduces a GIF image using ffmpeg.
Args:
in_gif (str): The input file path to the GIF image.
out_gif (str, optional): The output file path to the GIF image. Defaults to None.
"""
import ffmpeg
import shutil
if not is_tool('ffmpeg'):
print('ffmpeg is not installed on your computer.')
return
if not os.path.exists(in_gif):
print('The input gif file does not exist.')
return
if out_gif is None:
out_gif = in_gif
elif not os.path.exists(os.path.dirname(out_gif)):
os.makedirs(os.path.dirname(out_gif))
if in_gif == out_gif:
tmp_gif = in_gif.replace('.gif', '_tmp.gif')
shutil.copyfile(in_gif, tmp_gif)
stream = ffmpeg.input(tmp_gif)
stream = ffmpeg.output(stream, in_gif).overwrite_output()
ffmpeg.run(stream)
os.remove(tmp_gif)
else:
stream = ffmpeg.input(in_gif)
stream = ffmpeg.output(stream, out_gif).overwrite_output()
ffmpeg.run(stream)
def upload_to_imgur(in_gif):
"""Uploads an image to imgur.com
Args:
in_gif (str): The file path to the image.
"""
import subprocess
pkg_name = 'imgur-uploader'
if not is_tool(pkg_name):
check_install(pkg_name)
try:
IMGUR_API_ID = os.environ.get('IMGUR_API_ID', None)
IMGUR_API_SECRET = os.environ.get('IMGUR_API_SECRET', None)
credentials_path = os.path.join(os.path.expanduser(
'~'), '.config/imgur_uploader/uploader.cfg')
if ((IMGUR_API_ID is not None) and (IMGUR_API_SECRET is not None)) or os.path.exists(credentials_path):
proc = subprocess.Popen(
['imgur-uploader', in_gif], stdout=subprocess.PIPE)
for i in range(0, 2):
line = proc.stdout.readline()
print(line.rstrip().decode("utf-8"))
# while True:
# line = proc.stdout.readline()
# if not line:
# break
# print(line.rstrip().decode("utf-8"))
else:
print('Imgur API credentials could not be found. Please check https://pypi.org/project/imgur-uploader/ for instructions on how to get Imgur API credentials')
return
except Exception as e:
print(e)
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
from shutil import which
return which(name) is not None
def image_props(img, date_format='YYYY-MM-dd'):
"""Gets image properties.
Args:
img (ee.Image): The input image.
date_format (str, optional): The output date format. Defaults to 'YYYY-MM-dd HH:mm:ss'.
Returns:
dd.Dictionary: The dictionary containing image properties.
"""
if not isinstance(img, ee.Image):
print('The input object must be an ee.Image')
return
keys = img.propertyNames().remove('system:footprint').remove('system:bands')
values = keys.map(lambda p: img.get(p))
bands = img.bandNames()
scales = bands.map(lambda b: img.select([b]).projection().nominalScale())
scale = ee.Algorithms.If(scales.distinct().size().gt(
1), ee.Dictionary.fromLists(bands.getInfo(), scales), scales.get(0))
image_date = ee.Date(img.get('system:time_start')).format(date_format)
time_start = ee.Date(img.get('system:time_start')
).format('YYYY-MM-dd HH:mm:ss')
# time_end = ee.Date(img.get('system:time_end')).format('YYYY-MM-dd HH:mm:ss')
time_end = ee.Algorithms.If(ee.List(img.propertyNames()).contains('system:time_end'), ee.Date(
img.get('system:time_end')).format('YYYY-MM-dd HH:mm:ss'), time_start)
asset_size = ee.Number(img.get('system:asset_size')).divide(
1e6).format().cat(ee.String(' MB'))
props = ee.Dictionary.fromLists(keys, values)
props = props.set('system:time_start', time_start)
props = props.set('system:time_end', time_end)
props = props.set('system:asset_size', asset_size)
props = props.set('NOMINAL_SCALE', scale)
props = props.set('IMAGE_DATE', image_date)
return props
def image_stats(img, region=None, scale=None):
"""Gets image descriptive statistics.
Args:
img (ee.Image): The input image to calculate descriptive statistics.
region (object, optional): The region over which to reduce data. Defaults to the footprint of the image's first band.
scale (float, optional): A nominal scale in meters of the projection to work in. Defaults to None.
Returns:
ee.Dictionary: A dictionary containing the description statistics of the input image.
"""
import geemap.utils as utils
if not isinstance(img, ee.Image):
print('The input object must be an ee.Image')
return
stat_types = ['min', 'max', 'mean', 'std', 'sum']
image_min = utils.image_min_value(img, region, scale)
image_max = utils.image_max_value(img, region, scale)
image_mean = utils.image_mean_value(img, region, scale)
image_std = utils.image_std_value(img, region, scale)
image_sum = utils.image_sum_value(img, region, scale)
stat_results = ee.List(
[image_min, image_max, image_mean, image_std, image_sum])
stats = ee.Dictionary.fromLists(stat_types, stat_results)
return stats
def date_sequence(start, end, unit, date_format='YYYY-MM-dd'):
"""Creates a date sequence.
Args:
start (str): The start date, e.g., '2000-01-01'.
end (str): The end date, e.g., '2000-12-31'.
unit (str): One of 'year', 'month' 'week', 'day', 'hour', 'minute', or 'second'.
date_format (str, optional): A pattern, as described at http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. Defaults to 'YYYY-MM-dd'.
Returns:
ee.List: A list of date sequence.
"""
start_date = ee.Date(start)
end_date = ee.Date(end)
count = ee.Number(end_date.difference(start_date, unit)).toInt()
num_seq = ee.List.sequence(0, count)
date_seq = num_seq.map(
lambda d: start_date.advance(d, unit).format(date_format))
return date_seq
def adjust_longitude(in_fc):
"""Adjusts longitude if it is less than -180 or greater than 180.
Args:
in_fc (dict): The input dictionary containing coordinates.
Returns:
dict: A dictionary containing the converted longitudes
"""
try:
keys = in_fc.keys()
if 'geometry' in keys:
coordinates = in_fc['geometry']['coordinates']
if in_fc['geometry']['type'] == 'Point':
longitude = coordinates[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['geometry']['coordinates'][0] = longitude
elif in_fc['geometry']['type'] == 'Polygon':
for index1, item in enumerate(coordinates):
for index2, element in enumerate(item):
longitude = element[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['geometry']['coordinates'][index1][index2][0] = longitude
elif in_fc['geometry']['type'] == 'LineString':
for index, element in enumerate(coordinates):
longitude = element[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['geometry']['coordinates'][index][0] = longitude
elif 'type' in keys:
coordinates = in_fc['coordinates']
if in_fc['type'] == 'Point':
longitude = coordinates[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['coordinates'][0] = longitude
elif in_fc['type'] == 'Polygon':
for index1, item in enumerate(coordinates):
for index2, element in enumerate(item):
longitude = element[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['coordinates'][index1][index2][0] = longitude
elif in_fc['type'] == 'LineString':
for index, element in enumerate(coordinates):
longitude = element[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['coordinates'][index][0] = longitude
return in_fc
except Exception as e:
print(e)
return None
def set_proxy(port=1080, ip='http://127.0.0.1'):
"""Sets proxy if needed. This is only needed for countries where Google services are not available.
Args:
port (int, optional): The proxy port number. Defaults to 1080.
ip (str, optional): The IP address. Defaults to 'http://127.0.0.1'.
"""
import os
import requests
try:
if not ip.startswith('http'):
ip = 'http://' + ip
proxy = '{}:{}'.format(ip, port)
os.environ['HTTP_PROXY'] = proxy
os.environ['HTTPS_PROXY'] = proxy
a = requests.get('https://earthengine.google.com/')
if a.status_code != 200:
print(
'Failed to connect to Earth Engine. Please double check the port number and ip address.')
except Exception as e:
print(e)
def in_colab_shell():
"""Tests if the code is being executed within Google Colab."""
try:
import google.colab # pylint: disable=unused-variable
return True
except ImportError:
return False
def is_drive_mounted():
"""Checks whether Google Drive is mounted in Google Colab.
Returns:
bool: Returns True if Google Drive is mounted, False otherwise.
"""
drive_path = '/content/drive/My Drive'
if os.path.exists(drive_path):
return True
else:
return False
def credentials_in_drive():
"""Checks if the ee credentials file exists in Google Drive.
Returns:
bool: Returns True if Google Drive is mounted, False otherwise.
"""
credentials_path = '/content/drive/My Drive/.config/earthengine/credentials'
if os.path.exists(credentials_path):
return True
else:
return False
def credentials_in_colab():
"""Checks if the ee credentials file exists in Google Colab.
Returns:
bool: Returns True if Google Drive is mounted, False otherwise.
"""
credentials_path = '/root/.config/earthengine/credentials'
if os.path.exists(credentials_path):
return True
else:
return False
def copy_credentials_to_drive():
"""Copies ee credentials from Google Colab to Google Drive.
"""
import shutil
src = '/root/.config/earthengine/credentials'
dst = '/content/drive/My Drive/.config/earthengine/credentials'
wd = os.path.dirname(dst)
if not os.path.exists(wd):
os.makedirs(wd)
shutil.copyfile(src, dst)
def copy_credentials_to_colab():
"""Copies ee credentials from Google Drive to Google Colab.
"""
import shutil
src = '/content/drive/My Drive/.config/earthengine/credentials'
dst = '/root/.config/earthengine/credentials'
wd = os.path.dirname(dst)
if not os.path.exists(wd):
os.makedirs(wd)
shutil.copyfile(src, dst)
def create_download_link(filename, title="Click here to download: "):
"""Downloads a file from voila. Adopted from https://github.com/voila-dashboards/voila/issues/578
Args:
filename (str): The file path to the file to download
title (str, optional): str. Defaults to "Click here to download: ".
Returns:
str: HTML download URL.
"""
import base64
from IPython.display import HTML
data = open(filename, "rb").read()
b64 = base64.b64encode(data)
payload = b64.decode()
basename = os.path.basename(filename)
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" style="color:#0000FF;" target="_blank">{title}</a>'
html = html.format(payload=payload, title=title +
f' {basename}', filename=basename)
return HTML(html)
def edit_download_html(htmlWidget, filename, title="Click here to download: "):
"""Downloads a file from voila. Adopted from https://github.com/voila-dashboards/voila/issues/578#issuecomment-617668058
Args:
htmlWidget (object): The HTML widget to display the URL.
filename (str): File path to download.
title (str, optional): Download description. Defaults to "Click here to download: ".
"""
from IPython.display import HTML
import ipywidgets as widgets
import base64
# Change widget html temperarily to a font-awesome spinner
htmlWidget.value = "<i class=\"fa fa-spinner fa-spin fa-2x fa-fw\"></i><span class=\"sr-only\">Loading...</span>"
# Process raw data
data = open(filename, "rb").read()
b64 = base64.b64encode(data)
payload = b64.decode()
basename = os.path.basename(filename)
# Create and assign html to widget
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>'
htmlWidget.value = html.format(
payload=payload, title=title+basename, filename=basename)
# htmlWidget = widgets.HTML(value = '')
# htmlWidget
def load_GeoTIFF(URL):
"""Loads a Cloud Optimized GeoTIFF (COG) as an Image. Only Google Cloud Storage is supported. The URL can be one of the following formats:
Option 1: gs://pdd-stac/disasters/hurricane-harvey/0831/20170831_172754_101c_3B_AnalyticMS.tif
Option 2: https://storage.googleapis.com/pdd-stac/disasters/hurricane-harvey/0831/20170831_172754_101c_3B_AnalyticMS.tif
Option 3: https://storage.cloud.google.com/gcp-public-data-landsat/LC08/01/044/034/LC08_L1TP_044034_20131228_20170307_01_T1/LC08_L1TP_044034_20131228_20170307_01_T1_B5.TIF
Args:
URL (str): The Cloud Storage URL of the GeoTIFF to load.
Returns:
ee.Image: an Earth Engine image.
"""
uri = URL.strip()
if uri.startswith('https://storage.googleapis.com/'):
uri = uri.replace('https://storage.googleapis.com/', 'gs://')
elif uri.startswith('https://storage.cloud.google.com/'):
uri = uri.replace('https://storage.cloud.google.com/', 'gs://')
if not uri.startswith('gs://'):
raise Exception(
'Invalid GCS URL: {}. Expected something of the form "gs://bucket/path/to/object.tif".'.format(uri))
if not uri.lower().endswith('.tif'):
raise Exception(
'Invalid GCS URL: {}. Expected something of the form "gs://bucket/path/to/object.tif".'.format(uri))
cloud_image = ee.Image.loadGeoTIFF(uri)
return cloud_image
def load_GeoTIFFs(URLs):
"""Loads a list of Cloud Optimized GeoTIFFs (COG) as an ImageCollection. URLs is a list of URL, which can be one of the following formats:
Option 1: gs://pdd-stac/disasters/hurricane-harvey/0831/20170831_172754_101c_3B_AnalyticMS.tif
Option 2: https://storage.googleapis.com/pdd-stac/disasters/hurricane-harvey/0831/20170831_172754_101c_3B_AnalyticMS.tif
Option 3: https://storage.cloud.google.com/gcp-public-data-landsat/LC08/01/044/034/LC08_L1TP_044034_20131228_20170307_01_T1/LC08_L1TP_044034_20131228_20170307_01_T1_B5.TIF
Args:
URLs (list): A list of Cloud Storage URL of the GeoTIFF to load.
Returns:
ee.ImageCollection: An Earth Engine ImageCollection.
"""
if not isinstance(URLs, list):
raise Exception('The URLs argument must be a list.')
URIs = []
for URL in URLs:
uri = URL.strip()
if uri.startswith('https://storage.googleapis.com/'):
uri = uri.replace('https://storage.googleapis.com/', 'gs://')
elif uri.startswith('https://storage.cloud.google.com/'):
uri = uri.replace('https://storage.cloud.google.com/', 'gs://')
if not uri.startswith('gs://'):
raise Exception(
'Invalid GCS URL: {}. Expected something of the form "gs://bucket/path/to/object.tif".'.format(uri))
if not uri.lower().endswith('.tif'):
raise Exception(
'Invalid GCS URL: {}. Expected something of the form "gs://bucket/path/to/object.tif".'.format(uri))
URIs.append(uri)
URIs = ee.List(URIs)
collection = URIs.map(lambda uri: ee.Image.loadGeoTIFF(uri))
return ee.ImageCollection(collection)
def landsat_ts_norm_diff(collection, bands=['Green', 'SWIR1'], threshold=0):
"""Computes a normalized difference index based on a Landsat timeseries.
Args:
collection (ee.ImageCollection): A Landsat timeseries.
bands (list, optional): The bands to use for computing normalized difference. Defaults to ['Green', 'SWIR1'].
threshold (float, optional): The threshold to extract features. Defaults to 0.
Returns:
ee.ImageCollection: An ImageCollection containing images with values greater than the specified threshold.
"""
nd_images = collection.map(lambda img: img.normalizedDifference(
bands).gt(threshold).copyProperties(img, img.propertyNames()))
return nd_images
def landsat_ts_norm_diff_gif(collection, out_gif=None, vis_params=None, palette=['black', 'blue'], dimensions=768, frames_per_second=10):
"""[summary]
Args:
collection (ee.ImageCollection): The normalized difference Landsat timeseires.
out_gif (str, optional): File path to the output animated GIF. Defaults to None.
vis_params (dict, optional): Visualization parameters. Defaults to None.
palette (list, optional): The palette to use for visualizing the timelapse. Defaults to ['black', 'blue']. The first color in the list is the background color.
dimensions (int, optional): a number or pair of numbers in format WIDTHxHEIGHT) Maximum dimensions of the thumbnail to render, in pixels. If only one number is passed, it is used as the maximum, and the other dimension is computed by proportional scaling. Defaults to 768.
frames_per_second (int, optional): Animation speed. Defaults to 10.
Returns:
str: File path to the output animated GIF.
"""
coordinates = ee.Image(collection.first()).get('coordinates')
roi = ee.Geometry.Polygon(coordinates, None, False)
if out_gif is None:
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
filename = 'landsat_ts_nd_' + random_string() + '.gif'
out_gif = os.path.join(out_dir, filename)
elif not out_gif.endswith('.gif'):
raise Exception('The output file must end with .gif')
bands = ['nd']
if vis_params is None:
vis_params = {}
vis_params['bands'] = bands
vis_params['palette'] = palette
video_args = vis_params.copy()
video_args['dimensions'] = dimensions
video_args['region'] = roi
video_args['framesPerSecond'] = frames_per_second
video_args['crs'] = 'EPSG:3857'
if 'bands' not in video_args.keys():
video_args['bands'] = bands
download_ee_video(collection, video_args, out_gif)
return out_gif
| 38.986516 | 452 | 0.590684 |
import colour
import ee
import geocoder
import ipyleaflet
import math
import os
import time
import ipywidgets as widgets
from bqplot import pyplot as plt
from ipyfilechooser import FileChooser
from ipyleaflet import *
from ipytree import Tree, Node
from IPython.display import display
from .basemaps import ee_basemaps
from .conversion import *
from .legends import builtin_legends
def ee_initialize(token_name='EARTHENGINE_TOKEN'):
try:
ee_token = os.environ.get(token_name)
if ee_token is not None:
credential = '{"refresh_token":"%s"}' % ee_token
credential_file_path = os.path.expanduser("~/.config/earthengine/")
os.makedirs(credential_file_path, exist_ok=True)
with open(credential_file_path + 'credentials', 'w') as file:
file.write(credential)
elif in_colab_shell():
if credentials_in_drive() and (not credentials_in_colab()):
copy_credentials_to_colab()
elif not credentials_in_colab:
ee.Authenticate()
if is_drive_mounted() and (not credentials_in_drive()):
copy_credentials_to_drive()
else:
if is_drive_mounted():
copy_credentials_to_drive()
ee.Initialize()
except:
ee.Authenticate()
ee.Initialize()
class Map(ipyleaflet.Map):
def __init__(self, **kwargs):
ee_initialize()
latlon = [40, -100]
zoom = 4
if 'location' in kwargs.keys():
kwargs['center'] = kwargs['location']
kwargs.pop('location')
if 'center' in kwargs.keys():
latlon = kwargs['center']
else:
kwargs['center'] = latlon
if 'zoom_start' in kwargs.keys():
kwargs['zoom'] = kwargs['zoom_start']
kwargs.pop('zoom_start')
if 'zoom' in kwargs.keys():
zoom = kwargs['zoom']
else:
kwargs['zoom'] = zoom
if 'add_google_map' not in kwargs.keys():
kwargs['add_google_map'] = True
if 'show_attribution' not in kwargs.keys():
kwargs['show_attribution'] = True
if 'scroll_wheel_zoom' not in kwargs.keys():
kwargs['scroll_wheel_zoom'] = True
if 'zoom_control' not in kwargs.keys():
kwargs['zoom_control'] = True
if 'height' not in kwargs.keys():
kwargs['height'] = '550px'
super().__init__(**kwargs)
self.layout.height = kwargs['height']
self.clear_controls()
self.draw_count = 0
self.draw_features = []
self.draw_last_feature = None
self.draw_layer = None
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = None
self.user_rois = None
self.roi_start = False
self.roi_end = False
self.roi_reducer = ee.Reducer.mean()
self.roi_reducer_scale = None
self.chart_points = []
self.chart_values = []
self.chart_labels = None
self.plot_widget = None
self.plot_control = None
self.random_marker = None
self.legend_widget = None
self.legend_control = None
self.ee_layers = []
self.ee_layer_names = []
self.ee_raster_layers = []
self.ee_raster_layer_names = []
self.ee_layer_dict = {}
self.search_locations = None
self.search_loc_marker = None
self.search_loc_geom = None
self.search_datasets = None
self.screenshot = None
self.toolbar = None
self.toolbar_button = None
search_button = widgets.ToggleButton(
value=False,
tooltip='Search location/data',
icon='globe'
)
search_button.layout.width = '36px'
search_type = widgets.ToggleButtons(
options=['name/address', 'lat-lon', 'data'],
tooltips=['Search by place name or address',
'Search by lat-lon coordinates', 'Search Earth Engine data catalog']
)
search_type.style.button_width = '110px'
search_box = widgets.Text(
placeholder='Search by place name or address',
tooltip='Search location',
)
search_box.layout.width = '340px'
search_output = widgets.Output(
layout={'max_width': '340px', 'max_height': '250px', 'overflow': 'scroll'})
search_results = widgets.RadioButtons()
assets_dropdown = widgets.Dropdown()
assets_dropdown.layout.min_width = '279px'
assets_dropdown.layout.max_width = '279px'
assets_dropdown.options = []
import_btn = widgets.Button(
description='import',
button_style='primary',
tooltip='Click to import the selected asset',
)
import_btn.layout.min_width = '57px'
import_btn.layout.max_width = '57px'
def import_btn_clicked(b):
if assets_dropdown.value != '':
datasets = self.search_datasets
dataset = datasets[assets_dropdown.index]
dataset_uid = 'dataset_' + random_string(string_length=3)
line1 = '{} = {}\n'.format(
dataset_uid, dataset['ee_id_snippet'])
line2 = 'Map.addLayer(' + dataset_uid + \
', {}, "' + dataset['id'] + '")'
contents = ''.join([line1, line2])
create_code_cell(contents)
import_btn.on_click(import_btn_clicked)
html_widget = widgets.HTML()
def dropdown_change(change):
dropdown_index = assets_dropdown.index
if dropdown_index is not None and dropdown_index >= 0:
with search_output:
search_output.clear_output(wait=True)
print('Loading ...')
datasets = self.search_datasets
dataset = datasets[dropdown_index]
dataset_html = ee_data_html(dataset)
html_widget.value = dataset_html
search_output.clear_output(wait=True)
display(html_widget)
assets_dropdown.observe(dropdown_change, names='value')
assets_combo = widgets.HBox()
assets_combo.children = [import_btn, assets_dropdown]
def search_result_change(change):
result_index = search_results.index
locations = self.search_locations
location = locations[result_index]
latlon = (location.lat, location.lng)
self.search_loc_geom = ee.Geometry.Point(
location.lng, location.lat)
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
search_results.observe(search_result_change, names='value')
def search_btn_click(change):
if change['new']:
search_widget.children = [search_button, search_result_widget]
else:
search_widget.children = [search_button]
search_result_widget.children = [search_type, search_box]
search_button.observe(search_btn_click, 'value')
def search_type_changed(change):
search_box.value = ''
search_output.clear_output()
if change['new'] == 'name/address':
search_box.placeholder = 'Search by place name or address, e.g., Paris'
assets_dropdown.options = []
search_result_widget.children = [
search_type, search_box, search_output]
elif change['new'] == 'lat-lon':
search_box.placeholder = 'Search by lat-lon, e.g., 40, -100'
assets_dropdown.options = []
search_result_widget.children = [
search_type, search_box, search_output]
elif change['new'] == 'data':
search_box.placeholder = 'Search GEE data catalog by keywords, e.g., elevation'
search_result_widget.children = [
search_type, search_box, assets_combo, search_output]
search_type.observe(search_type_changed, names='value')
def search_box_callback(text):
if text.value != '':
if search_type.value == 'name/address':
g = geocode(text.value)
elif search_type.value == 'lat-lon':
g = geocode(text.value, reverse=True)
if g is None and latlon_from_text(text.value):
search_output.clear_output()
latlon = latlon_from_text(text.value)
self.search_loc_geom = ee.Geometry.Point(
latlon[1], latlon[0])
if self.search_loc_marker is None:
marker = Marker(
location=latlon, draggable=False, name='Search location')
self.search_loc_marker = marker
self.add_layer(marker)
self.center = latlon
else:
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
with search_output:
print('No address found for {}'.format(latlon))
return
elif search_type.value == 'data':
search_output.clear_output()
with search_output:
print('Searching ...')
self.default_style = {'cursor': 'wait'}
ee_assets = search_ee_data(text.value)
self.search_datasets = ee_assets
asset_titles = [x['title'] for x in ee_assets]
assets_dropdown.options = asset_titles
search_output.clear_output()
if len(ee_assets) > 0:
html_widget.value = ee_data_html(ee_assets[0])
with search_output:
display(html_widget)
self.default_style = {'cursor': 'default'}
return
self.search_locations = g
if g is not None and len(g) > 0:
top_loc = g[0]
latlon = (top_loc.lat, top_loc.lng)
self.search_loc_geom = ee.Geometry.Point(
top_loc.lng, top_loc.lat)
if self.search_loc_marker is None:
marker = Marker(
location=latlon, draggable=False, name='Search location')
self.search_loc_marker = marker
self.add_layer(marker)
self.center = latlon
else:
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
search_results.options = [x.address for x in g]
search_result_widget.children = [
search_type, search_box, search_output]
with search_output:
search_output.clear_output(wait=True)
display(search_results)
else:
with search_output:
search_output.clear_output()
print('No results could be found.')
search_box.on_submit(search_box_callback)
search_result_widget = widgets.VBox()
search_result_widget.children = [search_type, search_box]
search_widget = widgets.HBox()
search_widget.children = [search_button]
data_control = WidgetControl(
widget=search_widget, position='topleft')
self.add_control(control=data_control)
search_marker = Marker(icon=AwesomeIcon(
name="check", marker_color='green', icon_color='darkgreen'))
search = SearchControl(position="topleft",
url='https://nominatim.openstreetmap.org/search?format=json&q={s}',
zoom=5,
property_name='display_name',
marker=search_marker
)
self.add_control(search)
if kwargs['zoom_control']:
self.add_control(ZoomControl(position='topleft'))
layer_control = LayersControl(position='topright')
self.add_control(layer_control)
self.layer_control = layer_control
scale = ScaleControl(position='bottomleft')
self.add_control(scale)
self.scale_control = scale
fullscreen = FullScreenControl()
self.add_control(fullscreen)
self.fullscreen_control = fullscreen
measure = MeasureControl(
position='bottomleft',
active_color='orange',
primary_length_unit='kilometers'
)
self.add_control(measure)
self.measure_control = measure
if kwargs.get('add_google_map'):
self.add_layer(ee_basemaps['ROADMAP'])
if kwargs.get('show_attribution'):
self.add_control(AttributionControl(position='bottomright'))
draw_control = DrawControl(marker={'shapeOptions': {'color': '#0000FF'}},
rectangle={'shapeOptions': {
'color': '#0000FF'}},
circle={'shapeOptions': {
'color': '#0000FF'}},
circlemarker={},
)
draw_control_lite = DrawControl(marker={},
rectangle={'shapeOptions': {
'color': '#0000FF'}},
circle={'shapeOptions': {
'color': '#0000FF'}},
circlemarker={},
polyline={},
polygon={}
)
def handle_draw(target, action, geo_json):
try:
self.roi_start = True
self.draw_count += 1
geom = geojson_to_ee(geo_json, False)
self.user_roi = geom
feature = ee.Feature(geom)
self.draw_last_json = geo_json
self.draw_last_bounds = minimum_bounding_box(geo_json)
self.draw_last_feature = feature
self.draw_features.append(feature)
collection = ee.FeatureCollection(self.draw_features)
self.user_rois = collection
ee_draw_layer = ee_tile_layer(
collection, {'color': 'blue'}, 'Drawn Features', True, 0.5)
if self.draw_count == 1:
self.add_layer(ee_draw_layer)
self.draw_layer = ee_draw_layer
else:
self.substitute_layer(self.draw_layer, ee_draw_layer)
self.draw_layer = ee_draw_layer
draw_control.clear()
self.roi_end = True
self.roi_start = False
except Exception as e:
print(e)
print("There was an error creating Earth Engine Feature.")
self.draw_count = 0
self.draw_features = []
self.draw_last_feature = None
self.draw_layer = None
self.user_roi = None
self.roi_start = False
self.roi_end = False
draw_control.on_draw(handle_draw)
self.add_control(draw_control)
self.draw_control = draw_control
self.draw_control_lite = draw_control_lite
self.plot_dropdown_control = None
self.plot_dropdown_widget = None
self.plot_options = {}
self.plot_marker_cluster = MarkerCluster(name="Marker Cluster")
self.plot_coordinates = []
self.plot_markers = []
self.plot_last_click = []
self.plot_all_clicks = []
inspector_checkbox = widgets.Checkbox(
value=False,
description='Inspector',
indent=False,
layout=widgets.Layout(height='18px')
)
inspector_checkbox.layout.width = '13ex'
plot_checkbox = widgets.Checkbox(
value=False,
description='Plotting',
indent=False,
)
plot_checkbox.layout.width = '13ex'
self.plot_checkbox = plot_checkbox
vb = widgets.VBox(children=[inspector_checkbox, plot_checkbox])
chk_control = WidgetControl(widget=vb, position='topright')
self.add_control(chk_control)
self.inspector_control = chk_control
self.inspector_checked = inspector_checkbox.value
self.plot_checked = plot_checkbox.value
def inspect_chk_changed(b):
self.inspector_checked = inspector_checkbox.value
if not self.inspector_checked:
output.clear_output()
inspector_checkbox.observe(inspect_chk_changed)
output = widgets.Output(layout={'border': '1px solid black'})
output_control = WidgetControl(widget=output, position='topright')
self.add_control(output_control)
def plot_chk_changed(button):
if button['name'] == 'value' and button['new']:
self.plot_checked = True
plot_dropdown_widget = widgets.Dropdown(
options=list(self.ee_raster_layer_names),
)
plot_dropdown_widget.layout.width = '18ex'
self.plot_dropdown_widget = plot_dropdown_widget
plot_dropdown_control = WidgetControl(
widget=plot_dropdown_widget, position='topright')
self.plot_dropdown_control = plot_dropdown_control
self.add_control(plot_dropdown_control)
self.remove_control(self.draw_control)
self.add_control(self.draw_control_lite)
elif button['name'] == 'value' and (not button['new']):
self.plot_checked = False
plot_dropdown_widget = self.plot_dropdown_widget
plot_dropdown_control = self.plot_dropdown_control
self.remove_control(plot_dropdown_control)
del plot_dropdown_widget
del plot_dropdown_control
if self.plot_control in self.controls:
plot_control = self.plot_control
plot_widget = self.plot_widget
self.remove_control(plot_control)
self.plot_control = None
self.plot_widget = None
del plot_control
del plot_widget
if self.plot_marker_cluster is not None and self.plot_marker_cluster in self.layers:
self.remove_layer(self.plot_marker_cluster)
self.remove_control(self.draw_control_lite)
self.add_control(self.draw_control)
plot_checkbox.observe(plot_chk_changed)
tool_output = widgets.Output()
tool_output.clear_output(wait=True)
save_map_widget = widgets.VBox()
save_type = widgets.ToggleButtons(
options=['HTML', 'PNG', 'JPG'],
tooltips=['Save the map as an HTML file',
'Take a screenshot and save as a PNG file',
'Take a screenshot and save as a JPG file']
)
file_chooser = FileChooser(os.getcwd())
file_chooser.default_filename = 'my_map.html'
file_chooser.use_dir_icons = False
ok_cancel = widgets.ToggleButtons(
options=['OK', 'Cancel'],
tooltips=['OK', 'Cancel'],
button_style='primary'
)
ok_cancel.value = None
def save_type_changed(change):
ok_cancel.value = None
file_chooser.default_path = os.getcwd()
if change['new'] == 'HTML':
file_chooser.default_filename = 'my_map.html'
elif change['new'] == 'PNG':
file_chooser.default_filename = 'my_map.png'
elif change['new'] == 'JPG':
file_chooser.default_filename = 'my_map.jpg'
save_map_widget.children = [save_type, file_chooser]
def chooser_callback(chooser):
save_map_widget.children = [save_type, file_chooser, ok_cancel]
def ok_cancel_clicked(change):
if change['new'] == 'OK':
file_path = file_chooser.selected
ext = os.path.splitext(file_path)[1]
if save_type.value == 'HTML' and ext.upper() == '.HTML':
tool_output.clear_output()
self.to_html(file_path)
elif save_type.value == 'PNG' and ext.upper() == '.PNG':
tool_output.clear_output()
self.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
elif save_type.value == 'JPG' and ext.upper() == '.JPG':
tool_output.clear_output()
self.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
else:
label = widgets.Label(
value="The selected file extension does not match the selected exporting type.")
save_map_widget.children = [save_type, file_chooser, label]
self.toolbar_reset()
elif change['new'] == 'Cancel':
tool_output.clear_output()
self.toolbar_reset()
save_type.observe(save_type_changed, names='value')
ok_cancel.observe(ok_cancel_clicked, names='value')
file_chooser.register_callback(chooser_callback)
save_map_widget.children = [save_type, file_chooser]
tools = {
'mouse-pointer': 'pointer',
'camera': 'to_image',
'info': 'identify',
'map-marker': 'plotting'
}
icons = ['mouse-pointer', 'camera', 'info', 'map-marker']
tooltips = ['Default pointer',
'Save map as HTML or image', 'Inspector', 'Plotting']
icon_width = '42px'
icon_height = '40px'
n_cols = 2
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(children=[widgets.ToggleButton(layout=widgets.Layout(width='auto', height='auto'),
button_style='primary', icon=icons[i], tooltip=tooltips[i]) for i in range(len(icons))],
layout=widgets.Layout(
width='90px',
grid_template_columns=(icon_width + ' ') * 2,
grid_template_rows=(icon_height + ' ') * n_rows,
grid_gap='1px 1px')
)
self.toolbar = toolbar_grid
def tool_callback(change):
if change['new']:
current_tool = change['owner']
for tool in toolbar_grid.children:
if not tool is current_tool:
tool.value = False
tool = change['owner']
if tools[tool.icon] == 'to_image':
with tool_output:
tool_output.clear_output()
display(save_map_widget)
else:
tool_output.clear_output()
save_map_widget.children = [save_type, file_chooser]
for tool in toolbar_grid.children:
tool.observe(tool_callback, 'value')
toolbar_button = widgets.ToggleButton(
value=False,
tooltip='Toolbar',
icon='wrench'
)
toolbar_button.layout.width = '37px'
self.toolbar_button = toolbar_button
def toolbar_btn_click(change):
if change['new']:
toolbar_widget.children = [toolbar_button, toolbar_grid]
else:
toolbar_widget.children = [toolbar_button]
tool_output.clear_output()
self.toolbar_reset()
toolbar_button.observe(toolbar_btn_click, 'value')
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_control = WidgetControl(
widget=toolbar_widget, position='topright')
self.add_control(toolbar_control)
tool_output_control = WidgetControl(
widget=tool_output, position='topright')
self.add_control(tool_output_control)
def handle_interaction(**kwargs):
latlon = kwargs.get('coordinates')
if kwargs.get('type') == 'click' and self.inspector_checked:
self.default_style = {'cursor': 'wait'}
sample_scale = self.getScale()
layers = self.ee_layers
with output:
output.clear_output(wait=True)
for index, ee_object in enumerate(layers):
xy = ee.Geometry.Point(latlon[::-1])
layer_names = self.ee_layer_names
layer_name = layer_names[index]
object_type = ee_object.__class__.__name__
try:
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) \
or isinstance(ee_object, ee.featurecollection.FeatureCollection):
ee_object = ee.FeatureCollection(ee_object)
if isinstance(ee_object, ee.Image):
item = ee_object.reduceRegion(
ee.Reducer.first(), xy, sample_scale).getInfo()
b_name = 'band'
if len(item) > 1:
b_name = 'bands'
print("{}: {} ({} {})".format(
layer_name, object_type, len(item), b_name))
keys = item.keys()
for key in keys:
print(" {}: {}".format(key, item[key]))
elif isinstance(ee_object, ee.FeatureCollection):
filtered = ee_object.filterBounds(xy)
size = filtered.size().getInfo()
if size > 0:
first = filtered.first()
props = first.toDictionary().getInfo()
b_name = 'property'
if len(props) > 1:
b_name = 'properties'
print("{}: Feature ({} {})".format(
layer_name, len(props), b_name))
keys = props.keys()
for key in keys:
print(" {}: {}".format(
key, props[key]))
except Exception as e:
print(e)
self.default_style = {'cursor': 'crosshair'}
if kwargs.get('type') == 'click' and self.plot_checked and len(self.ee_raster_layers) > 0:
plot_layer_name = self.plot_dropdown_widget.value
layer_names = self.ee_raster_layer_names
layers = self.ee_raster_layers
index = layer_names.index(plot_layer_name)
ee_object = layers[index]
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
try:
self.default_style = {'cursor': 'wait'}
plot_options = self.plot_options
sample_scale = self.getScale()
if'sample_scale' in plot_options.keys() and (plot_options['sample_scale'] is not None):
sample_scale = plot_options['sample_scale']
if 'title' not in plot_options.keys():
plot_options['title'] = plot_layer_name
if ('add_marker_cluster' in plot_options.keys()) and plot_options['add_marker_cluster']:
plot_coordinates = self.plot_coordinates
markers = self.plot_markers
marker_cluster = self.plot_marker_cluster
plot_coordinates.append(latlon)
self.plot_last_click = latlon
self.plot_all_clicks = plot_coordinates
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
self.plot_marker_cluster = marker_cluster
band_names = ee_object.bandNames().getInfo()
self.chart_labels = band_names
if self.roi_end:
if self.roi_reducer_scale is None:
scale = ee_object.select(
0).projection().nominalScale()
else:
scale = self.roi_reducer_scale
dict_values = ee_object.reduceRegion(
reducer=self.roi_reducer, geometry=self.user_roi, scale=scale, bestEffort=True).getInfo()
self.chart_points.append(
self.user_roi.centroid(1).coordinates().getInfo())
else:
xy = ee.Geometry.Point(latlon[::-1])
dict_values = ee_object.sample(
xy, scale=sample_scale).first().toDictionary().getInfo()
self.chart_points.append(xy.coordinates().getInfo())
band_values = list(dict_values.values())
self.chart_values.append(band_values)
self.plot(band_names, band_values, **plot_options)
if plot_options['title'] == plot_layer_name:
del plot_options['title']
self.default_style = {'cursor': 'crosshair'}
self.roi_end = False
except Exception as e:
if self.plot_widget is not None:
with self.plot_widget:
self.plot_widget.clear_output()
print("No data for the clicked location.")
else:
print(e)
self.default_style = {'cursor': 'crosshair'}
self.roi_end = False
self.on_interaction(handle_interaction)
def set_options(self, mapTypeId='HYBRID', styles=None, types=None):
self.clear_layers()
self.clear_controls()
self.scroll_wheel_zoom = True
self.add_control(ZoomControl(position='topleft'))
self.add_control(LayersControl(position='topright'))
self.add_control(ScaleControl(position='bottomleft'))
self.add_control(FullScreenControl())
self.add_control(DrawControl())
measure = MeasureControl(
position='bottomleft',
active_color='orange',
primary_length_unit='kilometers'
)
self.add_control(measure)
try:
self.add_layer(ee_basemaps[mapTypeId])
except Exception as e:
print(e)
print(
'Google basemaps can only be one of "ROADMAP", "SATELLITE", "HYBRID" or "TERRAIN".')
setOptions = set_options
def add_ee_layer(self, ee_object, vis_params={}, name=None, shown=True, opacity=1.0):
image = None
if name is None:
layer_count = len(self.layers)
name = 'Layer ' + str(layer_count + 1)
if not isinstance(ee_object, ee.Image) and not isinstance(ee_object, ee.ImageCollection) and not isinstance(ee_object, ee.FeatureCollection) and not isinstance(ee_object, ee.Feature) and not isinstance(ee_object, ee.Geometry):
err_str = "\n\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):
features = ee.FeatureCollection(ee_object)
width = 2
if 'width' in vis_params:
width = vis_params['width']
color = '000000'
if 'color' in vis_params:
color = vis_params['color']
image_fill = features.style(
**{'fillColor': color}).updateMask(ee.Image.constant(0.5))
image_outline = features.style(
**{'color': color, 'fillColor': '00000000', 'width': width})
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
map_id_dict = ee.Image(image).getMapId(vis_params)
tile_layer = ipyleaflet.TileLayer(
url=map_id_dict['tile_fetcher'].url_format,
attribution='Google Earth Engine',
name=name,
opacity=opacity,
visible=True
)
layer = self.find_layer(name=name)
if layer is not None:
existing_object = self.ee_layer_dict[name]['ee_object']
if isinstance(existing_object, ee.Image) or isinstance(existing_object, ee.ImageCollection):
self.ee_raster_layers.remove(existing_object)
self.ee_raster_layer_names.remove(name)
if self.plot_dropdown_widget is not None:
self.plot_dropdown_widget.options = list(
self.ee_raster_layer_names)
self.ee_layers.remove(existing_object)
self.ee_layer_names.remove(name)
self.remove_layer(layer)
self.ee_layers.append(ee_object)
self.ee_layer_names.append(name)
self.ee_layer_dict[name] = {
'ee_object': ee_object, 'ee_layer': tile_layer}
self.add_layer(tile_layer)
if isinstance(ee_object, ee.Image) or isinstance(ee_object, ee.ImageCollection):
self.ee_raster_layers.append(ee_object)
self.ee_raster_layer_names.append(name)
if self.plot_dropdown_widget is not None:
self.plot_dropdown_widget.options = list(
self.ee_raster_layer_names)
addLayer = add_ee_layer
def set_center(self, lon, lat, zoom=None):
self.center = (lat, lon)
if zoom is not None:
self.zoom = zoom
setCenter = set_center
def center_object(self, ee_object, zoom=None):
lat = 0
lon = 0
bounds = [[lat, lon], [lat, lon]]
if isinstance(ee_object, ee.geometry.Geometry):
centroid = ee_object.centroid(1)
lon, lat = centroid.getInfo()['coordinates']
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.feature.Feature):
centroid = ee_object.geometry().centroid(1)
lon, lat = centroid.getInfo()['coordinates']
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.featurecollection.FeatureCollection):
centroid = ee_object.geometry().centroid()
lon, lat = centroid.getInfo()['coordinates']
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.image.Image):
geometry = ee_object.geometry()
coordinates = geometry.getInfo()['coordinates'][0]
bounds = [coordinates[0][::-1], coordinates[2][::-1]]
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
geometry = ee_object.geometry()
coordinates = geometry.getInfo()['coordinates'][0]
bounds = [coordinates[0][::-1], coordinates[2][::-1]]
else:
bounds = [[0, 0], [0, 0]]
lat = bounds[0][0]
lon = bounds[0][1]
self.setCenter(lon, lat, zoom)
centerObject = center_object
def get_scale(self):
zoom_level = self.zoom
resolution = 156543.04 * math.cos(0) / math.pow(2, zoom_level)
return resolution
getScale = get_scale
def add_basemap(self, basemap='HYBRID'):
try:
self.add_layer(ee_basemaps[basemap])
except Exception as e:
print(e)
print('Basemap can only be one of the following:\n {}'.format(
'\n '.join(ee_basemaps.keys())))
def find_layer(self, name):
layers = self.layers
for layer in layers:
if layer.name == name:
return layer
return None
def layer_opacity(self, name, value=1.0):
layer = self.find_layer(name)
try:
layer.opacity = value
print(e)
def add_wms_layer(self, url, layers, name=None, attribution='', format='image/jpeg', transparent=False, opacity=1.0, shown=True):
if name is None:
name = str(layers)
try:
wms_layer = ipyleaflet.WMSLayer(
url=url,
layers=layers,
name=name,
attribution=attribution,
format=format,
transparent=transparent,
opacity=opacity,
visible=True
)
self.add_layer(wms_layer)
except Exception as e:
print(e)
print("Failed to add the specified WMS TileLayer.")
def add_tile_layer(self, url='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', name=None, attribution='', opacity=1.0, shown=True):
try:
tile_layer = ipyleaflet.TileLayer(
url=url,
name=name,
attribution=attribution,
opacity=opacity,
visible=True
)
self.add_layer(tile_layer)
except Exception as e:
print(e)
print("Failed to add the specified TileLayer.")
def add_minimap(self, zoom=5, position="bottomright"):
minimap = ipyleaflet.Map(
zoom_control=False, attribution_control=False,
zoom=5, center=self.center, layers=[ee_basemaps['ROADMAP']]
)
minimap.layout.width = '150px'
minimap.layout.height = '150px'
link((minimap, 'center'), (self, 'center'))
minimap_control = WidgetControl(widget=minimap, position=position)
self.add_control(minimap_control)
def marker_cluster(self):
coordinates = []
markers = []
marker_cluster = MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
self.ee_markers = []
self.add_layer(marker_cluster)
def handle_interaction(**kwargs):
latlon = kwargs.get('coordinates')
if kwargs.get('type') == 'click':
coordinates.append(latlon)
geom = ee.Geometry.Point(latlon[1], latlon[0])
feature = ee.Feature(geom)
self.ee_markers.append(feature)
self.last_click = latlon
self.all_clicks = coordinates
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
elif kwargs.get('type') == 'mousemove':
pass
self.default_style = {'cursor': 'crosshair'}
self.on_interaction(handle_interaction)
def set_plot_options(self, add_marker_cluster=False, sample_scale=None, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):
plot_options_dict = {}
plot_options_dict['add_marker_cluster'] = add_marker_cluster
plot_options_dict['sample_scale'] = sample_scale
plot_options_dict['plot_type'] = plot_type
plot_options_dict['overlay'] = overlay
plot_options_dict['position'] = position
plot_options_dict['min_width'] = min_width
plot_options_dict['max_width'] = max_width
plot_options_dict['min_height'] = min_height
plot_options_dict['max_height'] = max_height
for key in kwargs.keys():
plot_options_dict[key] = kwargs[key]
self.plot_options = plot_options_dict
if add_marker_cluster and (self.plot_marker_cluster not in self.layers):
self.add_layer(self.plot_marker_cluster)
def plot(self, x, y, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):
if self.plot_widget is not None:
plot_widget = self.plot_widget
else:
plot_widget = widgets.Output(layout={'border': '1px solid black'})
plot_control = WidgetControl(widget=plot_widget, position=position, min_width=min_width,
max_width=max_width, min_height=min_height, max_height=max_height)
self.plot_widget = plot_widget
self.plot_control = plot_control
self.add_control(plot_control)
if max_width is None:
max_width = 500
if max_height is None:
max_height = 300
if (plot_type is None) and ('markers' not in kwargs.keys()):
kwargs['markers'] = 'circle'
with plot_widget:
try:
fig = plt.figure(1, **kwargs)
if max_width is not None:
fig.layout.width = str(max_width) + 'px'
if max_height is not None:
fig.layout.height = str(max_height) + 'px'
plot_widget.clear_output(wait=True)
if not overlay:
plt.clear()
if plot_type is None:
if 'marker' not in kwargs.keys():
kwargs['marker'] = 'circle'
plt.plot(x, y, **kwargs)
elif plot_type == 'bar':
plt.bar(x, y, **kwargs)
elif plot_type == 'scatter':
plt.scatter(x, y, **kwargs)
elif plot_type == 'hist':
plt.hist(y, **kwargs)
plt.show()
except Exception as e:
print(e)
print("Failed to create plot.")
def plot_demo(self, iterations=20, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):
import numpy as np
import time
if self.random_marker is not None:
self.remove_layer(self.random_marker)
image = ee.Image('LE7_TOA_5YEAR/1999_2003').select([0, 1, 2, 3, 4, 6])
self.addLayer(
image, {'bands': ['B4', 'B3', 'B2'], 'gamma': 1.4}, "LE7_TOA_5YEAR/1999_2003")
self.setCenter(-50.078877, 25.190030, 3)
band_names = image.bandNames().getInfo()
band_count = len(band_names)
latitudes = np.random.uniform(30, 48, size=iterations)
longitudes = np.random.uniform(-121, -76, size=iterations)
marker = Marker(location=(0, 0))
self.random_marker = marker
self.add_layer(marker)
for i in range(iterations):
try:
coordinate = ee.Geometry.Point([longitudes[i], latitudes[i]])
dict_values = image.sample(
coordinate).first().toDictionary().getInfo()
band_values = list(dict_values.values())
title = '{}/{}: Spectral signature at ({}, {})'.format(i+1, iterations,
round(latitudes[i], 2), round(longitudes[i], 2))
marker.location = (latitudes[i], longitudes[i])
self.plot(band_names, band_values, plot_type=plot_type, overlay=overlay,
min_width=min_width, max_width=max_width, min_height=min_height, max_height=max_height, title=title, **kwargs)
time.sleep(0.3)
except Exception as e:
print(e)
def plot_raster(self, ee_object=None, sample_scale=None, plot_type=None, overlay=False, position='bottomright', min_width=None, max_width=None, min_height=None, max_height=None, **kwargs):
if self.plot_control is not None:
del self.plot_widget
self.remove_control(self.plot_control)
if self.random_marker is not None:
self.remove_layer(self.random_marker)
plot_widget = widgets.Output(layout={'border': '1px solid black'})
plot_control = WidgetControl(widget=plot_widget, position=position, min_width=min_width,
max_width=max_width, min_height=min_height, max_height=max_height)
self.plot_widget = plot_widget
self.plot_control = plot_control
self.add_control(plot_control)
self.default_style = {'cursor': 'crosshair'}
msg = "The plot function can only be used on ee.Image or ee.ImageCollection with more than one band."
if (ee_object is None) and len(self.ee_raster_layers) > 0:
ee_object = self.ee_raster_layers[-1]
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif not isinstance(ee_object, ee.Image):
print(msg)
return
if sample_scale is None:
sample_scale = self.getScale()
if max_width is None:
max_width = 500
band_names = ee_object.bandNames().getInfo()
coordinates = []
markers = []
marker_cluster = MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
self.add_layer(marker_cluster)
def handle_interaction(**kwargs2):
latlon = kwargs2.get('coordinates')
if kwargs2.get('type') == 'click':
try:
coordinates.append(latlon)
self.last_click = latlon
self.all_clicks = coordinates
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
self.default_style = {'cursor': 'wait'}
xy = ee.Geometry.Point(latlon[::-1])
dict_values = ee_object.sample(
xy, scale=sample_scale).first().toDictionary().getInfo()
band_values = list(dict_values.values())
self.plot(band_names, band_values, plot_type=plot_type, overlay=overlay,
min_width=min_width, max_width=max_width, min_height=min_height, max_height=max_height, **kwargs)
self.default_style = {'cursor': 'crosshair'}
except Exception as e:
if self.plot_widget is not None:
with self.plot_widget:
self.plot_widget.clear_output()
print("No data for the clicked location.")
else:
print(e)
self.default_style = {'cursor': 'crosshair'}
self.on_interaction(handle_interaction)
def add_maker_cluster(self, event='click', add_marker=True):
coordinates = []
markers = []
marker_cluster = MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
if add_marker:
self.add_layer(marker_cluster)
def handle_interaction(**kwargs):
latlon = kwargs.get('coordinates')
if event == 'click' and kwargs.get('type') == 'click':
coordinates.append(latlon)
self.last_click = latlon
self.all_clicks = coordinates
if add_marker:
markers.append(Marker(location=latlon))
marker_cluster.markers = markers
elif kwargs.get('type') == 'mousemove':
pass
self.default_style = {'cursor': 'crosshair'}
self.on_interaction(handle_interaction)
def set_control_visibility(self, layerControl=True, fullscreenControl=True, latLngPopup=True):
pass
setControlVisibility = set_control_visibility
def add_layer_control(self):
pass
addLayerControl = add_layer_control
def split_map(self, left_layer='HYBRID', right_layer='ESRI'):
try:
self.remove_control(self.layer_control)
self.remove_control(self.inspector_control)
if left_layer in ee_basemaps.keys():
left_layer = ee_basemaps[left_layer]
if right_layer in ee_basemaps.keys():
right_layer = ee_basemaps[right_layer]
control = ipyleaflet.SplitMapControl(
left_layer=left_layer, right_layer=right_layer)
self.add_control(control)
except Exception as e:
print(e)
print('The provided layers are invalid!')
def ts_inspector(self, left_ts, right_ts, left_names, right_names, left_vis={}, right_vis={}):
left_count = int(left_ts.size().getInfo())
right_count = int(right_ts.size().getInfo())
if left_count != len(left_names):
print(
'The number of images in left_ts must match the number of layer names in left_names.')
return
if right_count != len(right_names):
print(
'The number of images in right_ts must match the number of layer names in right_names.')
return
left_layer = TileLayer(
url='https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}',
attribution='Google',
name='Google Maps'
)
right_layer = TileLayer(
url='https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}',
attribution='Google',
name='Google Maps'
)
self.clear_controls()
left_dropdown = widgets.Dropdown(options=left_names, value=None)
right_dropdown = widgets.Dropdown(options=right_names, value=None)
left_dropdown.layout.max_width = '130px'
right_dropdown.layout.max_width = '130px'
left_control = WidgetControl(widget=left_dropdown, position='topleft')
right_control = WidgetControl(
widget=right_dropdown, position='topright')
self.add_control(control=left_control)
self.add_control(control=right_control)
self.add_control(ZoomControl(position='topleft'))
self.add_control(ScaleControl(position='bottomleft'))
self.add_control(FullScreenControl())
def left_dropdown_change(change):
left_dropdown_index = left_dropdown.index
if left_dropdown_index is not None and left_dropdown_index >= 0:
try:
if isinstance(left_ts, ee.ImageCollection):
left_image = left_ts.toList(
left_ts.size()).get(left_dropdown_index)
elif isinstance(left_ts, ee.List):
left_image = left_ts.get(left_dropdown_index)
else:
print('The left_ts argument must be an ImageCollection.')
return
if isinstance(left_image, ee.ImageCollection):
left_image = ee.Image(left_image.mosaic())
elif isinstance(left_image, ee.Image):
pass
else:
left_image = ee.Image(left_image)
left_image = ee_tile_layer(
left_image, left_vis, left_names[left_dropdown_index])
left_layer.url = left_image.url
except Exception as e:
print(e)
return
left_dropdown.observe(left_dropdown_change, names='value')
def right_dropdown_change(change):
right_dropdown_index = right_dropdown.index
if right_dropdown_index is not None and right_dropdown_index >= 0:
try:
if isinstance(right_ts, ee.ImageCollection):
right_image = right_ts.toList(
left_ts.size()).get(right_dropdown_index)
elif isinstance(right_ts, ee.List):
right_image = right_ts.get(right_dropdown_index)
else:
print('The left_ts argument must be an ImageCollection.')
return
if isinstance(right_image, ee.ImageCollection):
right_image = ee.Image(right_image.mosaic())
elif isinstance(right_image, ee.Image):
pass
else:
right_image = ee.Image(right_image)
right_image = ee_tile_layer(
right_image, right_vis, right_names[right_dropdown_index])
right_layer.url = right_image.url
except Exception as e:
print(e)
return
right_dropdown.observe(right_dropdown_change, names='value')
try:
split_control = ipyleaflet.SplitMapControl(
left_layer=left_layer, right_layer=right_layer)
self.add_control(split_control)
except Exception as e:
print(e)
def basemap_demo(self):
dropdown = widgets.Dropdown(
options=list(ee_basemaps.keys()),
value='HYBRID',
description='Basemaps'
)
def on_click(change):
basemap_name = change['new']
old_basemap = self.layers[-1]
self.substitute_layer(old_basemap, ee_basemaps[basemap_name])
dropdown.observe(on_click, 'value')
basemap_control = WidgetControl(widget=dropdown, position='topright')
self.remove_control(self.inspector_control)
self.add_control(basemap_control)
def add_legend(self, legend_title='Legend', legend_dict=None, legend_keys=None, legend_colors=None, position='bottomright', builtin_legend=None, **kwargs):
import pkg_resources
from IPython.display import display
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py"))
legend_template = os.path.join(pkg_dir, 'data/template/legend.html')
if 'min_width' not in kwargs.keys():
min_width = None
else:
min_wdith = kwargs['min_width']
if 'max_width' not in kwargs.keys():
max_width = None
else:
max_width = kwargs['max_width']
if 'min_height' not in kwargs.keys():
min_height = None
else:
min_height = kwargs['min_height']
if 'max_height' not in kwargs.keys():
max_height = None
else:
max_height = kwargs['max_height']
if 'height' not in kwargs.keys():
height = None
else:
height = kwargs['height']
if 'width' not in kwargs.keys():
width = None
else:
width = kwargs['width']
if width is None:
max_width = '300px'
if height is None:
max_height = '400px'
if not os.path.exists(legend_template):
print('The legend template does not exist.')
return
if legend_keys is not None:
if not isinstance(legend_keys, list):
print('The legend keys must be a list.')
return
else:
legend_keys = ['One', 'Two', 'Three', 'Four', 'ect']
if legend_colors is not None:
if not isinstance(legend_colors, list):
print('The legend colors must be a list.')
return
elif all(isinstance(item, tuple) for item in legend_colors):
try:
legend_colors = [rgb_to_hex(x) for x in legend_colors]
except Exception as e:
print(e)
elif all((item.startswith('#') and len(item) == 7) for item in legend_colors):
pass
elif all((len(item) == 6) for item in legend_colors):
pass
else:
print('The legend colors must be a list of tuples.')
return
else:
legend_colors = ['#8DD3C7', '#FFFFB3',
'#BEBADA', '#FB8072', '#80B1D3']
if len(legend_keys) != len(legend_colors):
print('The legend keys and values must be the same length.')
return
allowed_builtin_legends = builtin_legends.keys()
if builtin_legend is not None:
if builtin_legend not in allowed_builtin_legends:
print('The builtin legend must be one of the following: {}'.format(
', '.join(allowed_builtin_legends)))
return
else:
legend_dict = builtin_legends[builtin_legend]
legend_keys = list(legend_dict.keys())
legend_colors = list(legend_dict.values())
if legend_dict is not None:
if not isinstance(legend_dict, dict):
print('The legend dict must be a dictionary.')
return
else:
legend_keys = list(legend_dict.keys())
legend_colors = list(legend_dict.values())
if all(isinstance(item, tuple) for item in legend_colors):
try:
legend_colors = [rgb_to_hex(x) for x in legend_colors]
except Exception as e:
print(e)
allowed_positions = ['topleft', 'topright',
'bottomleft', 'bottomright']
if position not in allowed_positions:
print('The position must be one of the following: {}'.format(
', '.join(allowed_positions)))
return
header = []
content = []
footer = []
with open(legend_template) as f:
lines = f.readlines()
lines[3] = lines[3].replace('Legend', legend_title)
header = lines[:6]
footer = lines[11:]
for index, key in enumerate(legend_keys):
color = legend_colors[index]
if not color.startswith('#'):
color = '#' + color
item = " <li><span style='background:{};'></span>{}</li>\n".format(
color, key)
content.append(item)
legend_html = header + content + footer
legend_text = ''.join(legend_html)
try:
if self.legend_control is not None:
legend_widget = self.legend_widget
legend_widget.close()
self.remove_control(self.legend_control)
legend_output_widget = widgets.Output(
layout={'border': '1px solid black', 'max_width': max_width, 'min_width': min_width, 'max_height': max_height,
'min_height': min_height, 'height': height, 'width': width, 'overflow': 'scroll'})
legend_control = WidgetControl(
widget=legend_output_widget, position=position)
legend_widget = widgets.HTML(value=legend_text)
with legend_output_widget:
display(legend_widget)
self.legend_widget = legend_output_widget
self.legend_control = legend_control
self.add_control(legend_control)
except Exception as e:
print(e)
def image_overlay(self, url, bounds, name):
from base64 import b64encode
from PIL import Image, ImageSequence
from io import BytesIO
try:
if not url.startswith('http'):
if not os.path.exists(url):
print('The provided file does not exist.')
return
ext = os.path.splitext(url)[1][1:]
image = Image.open(url)
f = BytesIO()
if ext.lower() == 'gif':
frames = []
for frame in ImageSequence.Iterator(image):
frame = frame.convert('RGBA')
b = BytesIO()
frame.save(b, format="gif")
frame = Image.open(b)
frames.append(frame)
frames[0].save(f, format='GIF', save_all=True,
append_images=frames[1:], loop=0)
else:
image.save(f, ext)
data = b64encode(f.getvalue())
data = data.decode('ascii')
url = 'data:image/{};base64,'.format(ext) + data
img = ipyleaflet.ImageOverlay(url=url, bounds=bounds, name=name)
self.add_layer(img)
except Exception as e:
print(e)
def video_overlay(self, url, bounds, name):
try:
video = ipyleaflet.VideoOverlay(url=url, bounds=bounds, name=name)
self.add_layer(video)
except Exception as e:
print(e)
def add_landsat_ts_gif(self, layer_name='Timelapse', roi=None, label=None, start_year=1984, end_year=2019, start_date='06-10', end_date='09-20', bands=['NIR', 'Red', 'Green'], vis_params=None, dimensions=768, frames_per_second=10, font_size=30, font_color='white', add_progress_bar=True, progress_bar_color='white', progress_bar_height=5, out_gif=None, download=False, apply_fmask=True, nd_bands=None, nd_threshold=0, nd_palette=['black', 'blue']):
try:
if roi is None:
if self.draw_last_feature is not None:
feature = self.draw_last_feature
roi = feature.geometry()
else:
roi = ee.Geometry.Polygon(
[[[-115.471773, 35.892718],
[-115.471773, 36.409454],
[-114.271283, 36.409454],
[-114.271283, 35.892718],
[-115.471773, 35.892718]]], None, False)
elif isinstance(roi, ee.Feature) or isinstance(roi, ee.FeatureCollection):
roi = roi.geometry()
elif isinstance(roi, ee.Geometry):
pass
else:
print('The provided roi is invalid. It must be an ee.Geometry')
return
geojson = ee_to_geojson(roi)
bounds = minimum_bounding_box(geojson)
geojson = adjust_longitude(geojson)
roi = ee.Geometry(geojson)
in_gif = landsat_ts_gif(roi=roi, out_gif=out_gif, start_year=start_year, end_year=end_year, start_date=start_date,
end_date=end_date, bands=bands, vis_params=vis_params, dimensions=dimensions, frames_per_second=frames_per_second, apply_fmask=apply_fmask, nd_bands=nd_bands, nd_threshold=nd_threshold, nd_palette=nd_palette)
in_nd_gif = in_gif.replace('.gif', '_nd.gif')
print('Adding animated text to GIF ...')
add_text_to_gif(in_gif, in_gif, xy=('2%', '2%'), text_sequence=start_year,
font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)
if nd_bands is not None:
add_text_to_gif(in_nd_gif, in_nd_gif, xy=('2%', '2%'), text_sequence=start_year,
font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)
if label is not None:
add_text_to_gif(in_gif, in_gif, xy=('2%', '90%'), text_sequence=label,
font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)
if is_tool('ffmpeg'):
reduce_gif_size(in_gif)
if nd_bands is not None:
reduce_gif_size(in_nd_gif)
print('Adding GIF to the map ...')
self.image_overlay(url=in_gif, bounds=bounds, name=layer_name)
if nd_bands is not None:
self.image_overlay(
url=in_nd_gif, bounds=bounds, name=layer_name+' ND')
print('The timelapse has been added to the map.')
if download:
link = create_download_link(
in_gif, title="Click here to download the timelapse: ")
display(link)
except Exception as e:
print(e)
def to_html(self, outfile, title='My Map', width='100%', height='880px'):
try:
if not outfile.endswith('.html'):
print('The output file must end with .html')
return
out_dir = os.path.dirname(outfile)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
before_width = self.layout.width
before_height = self.layout.height
if not isinstance(width, str):
print("width must be a string.")
return
elif width.endswith('px') or width.endswith('%'):
pass
else:
print('width must end with px or %')
return
if not isinstance(height, str):
print("height must be a string.")
return
elif not height.endswith('px'):
print('height must end with px')
return
self.layout.width = width
self.layout.height = height
self.save(outfile, title=title)
self.layout.width = before_width
self.layout.height = before_height
except Exception as e:
print(e)
def to_image(self, outfile=None, monitor=1):
if outfile is None:
outfile = os.path.join(os.getcwd(), 'my_map.png')
if outfile.endswith('.png') or outfile.endswith('.jpg'):
pass
else:
print('The output file must be a PNG or JPG image.')
return
work_dir = os.path.dirname(outfile)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
screenshot = screen_capture(outfile, monitor)
self.screenshot = screenshot
def toolbar_reset(self):
toolbar_grid = self.toolbar
for tool in toolbar_grid.children:
tool.value = False
def add_raster(self, image, bands=None, layer_name=None, colormap=None, x_dim='x', y_dim='y'):
try:
import xarray_leaflet
except:
'You need to install xarray_leaflet first. See https://github.com/davidbrochart/xarray_leaflet')
print(
'Try the following to install xarray_leaflet: \n\nconda install -c conda-forge xarray_leaflet')
return
import warnings
import numpy as np
import rioxarray
import xarray as xr
import matplotlib.pyplot as plt
warnings.simplefilter('ignore')
if not os.path.exists(image):
print('The image file does not exist.')
return
if colormap is None:
colormap = plt.cm.inferno
if layer_name is None:
layer_name = 'Layer_' + random_string()
if isinstance(colormap, str):
colormap = plt.cm.get_cmap(name=colormap)
da = rioxarray.open_rasterio(image, masked=True)
multi_band = False
if len(da.band) > 1:
multi_band = True
if bands is None:
bands = [3, 2, 1]
else:
bands = 1
if multi_band:
da = da.rio.write_nodata(0)
else:
da = da.rio.write_nodata(np.nan)
da = da.sel(band=bands)
if multi_band:
layer = da.leaflet.plot(
self, x_dim=x_dim, y_dim=y_dim, rgb_dim='band')
else:
layer = da.leaflet.plot(
self, x_dim=x_dim, y_dim=y_dim, colormap=colormap)
layer.name = layer_name
def remove_drawn_features(self):
if self.draw_layer is not None:
self.remove_layer(self.draw_layer)
self.draw_count = 0
self.draw_features = []
self.draw_last_feature = None
self.draw_layer = None
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = None
self.user_rois = None
self.chart_values = []
self.chart_points = []
self.chart_labels = None
def extract_values_to_points(self, filename):
import csv
filename = os.path.abspath(filename)
allowed_formats = ['csv', 'shp']
ext = filename[-3:]
if ext not in allowed_formats:
print('The output file must be one of the following: {}'.format(
', '.join(allowed_formats)))
return
out_dir = os.path.dirname(filename)
out_csv = filename[:-3] + 'csv'
out_shp = filename[:-3] + 'shp'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
count = len(self.chart_points)
out_list = []
if count > 0:
header = ['id', 'longitude', 'latitude'] + self.chart_labels
out_list.append(header)
for i in range(0, count):
id = i + 1
line = [id] + self.chart_points[i] + self.chart_values[i]
out_list.append(line)
with open(out_csv, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(out_list)
if ext == 'csv':
print('The csv file has been saved to: {}'.format(out_csv))
else:
csv_to_shp(out_csv, out_shp)
print('The shapefile has been saved to: {}'.format(out_shp))
def screen_capture(outfile, monitor=1):
from mss import mss
out_dir = os.path.dirname(outfile)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not isinstance(monitor, int):
print('The monitor number must be an integer.')
return
try:
with mss() as sct:
sct.shot(output=outfile, mon=monitor)
return outfile
except Exception as e:
print(e)
def install_from_github(url):
try:
download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
if not os.path.exists(download_dir):
os.makedirs(download_dir)
repo_name = os.path.basename(url)
zip_url = os.path.join(url, 'archive/master.zip')
filename = repo_name + '-master.zip'
download_from_url(url=zip_url, out_file_name=filename,
out_dir=download_dir, unzip=True)
pkg_dir = os.path.join(download_dir, repo_name + '-master')
pkg_name = os.path.basename(url)
work_dir = os.getcwd()
os.chdir(pkg_dir)
print('Installing {}...'.format(pkg_name))
cmd = 'pip install .'
os.system(cmd)
os.chdir(work_dir)
print('{} has been installed successfully.'.format(pkg_name))
except Exception as e:
print(e)
def rgb_to_hex(rgb=(255, 255, 255)):
return '%02x%02x%02x' % rgb
def hex_to_rgb(value='FFFFFF'):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv//3], 16) for i in range(0, lv, lv//3))
def check_color(in_color):
out_color = '#000000'
if isinstance(in_color, tuple) and len(in_color) == 3:
if all(isinstance(item, int) for item in in_color):
rescaled_color = [x / 255.0 for x in in_color]
out_color = colour.Color(rgb=tuple(rescaled_color))
return out_color.hex_l
else:
print(
'RGB color must be a tuple with three integer values ranging from 0 to 255.')
return
else:
try:
out_color = colour.Color(in_color)
return out_color.hex_l
except Exception as e:
print('The provided color is invalid. Using the default black color.')
print(e)
return out_color
def system_fonts(show_full_path=False):
try:
import matplotlib.font_manager
font_list = matplotlib.font_manager.findSystemFonts(
fontpaths=None, fontext='ttf')
font_list.sort()
font_names = [os.path.basename(f) for f in font_list]
font_names.sort()
if show_full_path:
return font_list
else:
return font_names
except Exception as e:
print(e)
def add_text_to_gif(in_gif, out_gif, xy=None, text_sequence=None, font_type="arial.ttf", font_size=20, font_color='#000000', add_progress_bar=True, progress_bar_color='white', progress_bar_height=5, duration=100, loop=0):
import io
import pkg_resources
import warnings
from PIL import Image, ImageDraw, ImageSequence, ImageFont
warnings.simplefilter('ignore')
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py"))
default_font = os.path.join(pkg_dir, 'data/fonts/arial.ttf')
in_gif = os.path.abspath(in_gif)
out_gif = os.path.abspath(out_gif)
if not os.path.exists(in_gif):
print('The input gif file does not exist.')
return
if not os.path.exists(os.path.dirname(out_gif)):
os.makedirs(os.path.dirname(out_gif))
if font_type == 'arial.ttf':
font = ImageFont.truetype(default_font, font_size)
else:
try:
font_list = system_fonts(show_full_path=True)
font_names = [os.path.basename(f) for f in font_list]
if (font_type in font_list) or (font_type in font_names):
font = ImageFont.truetype(font_type, font_size)
else:
print(
'The specified font type could not be found on your system. Using the default font instead.')
font = ImageFont.truetype(default_font, font_size)
except Exception as e:
print(e)
font = ImageFont.truetype(default_font, font_size)
color = check_color(font_color)
progress_bar_color = check_color(progress_bar_color)
try:
image = Image.open(in_gif)
except Exception as e:
print('An error occurred while opening the gif.')
print(e)
return
count = image.n_frames
W, H = image.size
progress_bar_widths = [i * 1.0 / count * W for i in range(1, count + 1)]
progress_bar_shapes = [[(0, H - progress_bar_height), (x, H)]
for x in progress_bar_widths]
if xy is None:
xy = (int(0.05 * W), int(0.05 * H))
elif (xy is not None) and (not isinstance(xy, tuple)) and (len(xy) == 2):
print("xy must be a tuple, e.g., (10, 10), ('10%', '10%')")
return
elif all(isinstance(item, int) for item in xy) and (len(xy) == 2):
x, y = xy
if (x > 0) and (x < W) and (y > 0) and (y < H):
pass
else:
print(
'xy is out of bounds. x must be within [0, {}], and y must be within [0, {}]'.format(W, H))
return
elif all(isinstance(item, str) for item in xy) and (len(xy) == 2):
x, y = xy
if ('%' in x) and ('%' in y):
try:
x = int(float(x.replace('%', '')) / 100.0 * W)
y = int(float(y.replace('%', '')) / 100.0 * H)
xy = (x, y)
except Exception as e:
print(
"The specified xy is invalid. It must be formatted like this ('10%', '10%')")
return
else:
print("The specified xy is invalid. It must be formatted like this: (10, 10) or ('10%', '10%')")
return
if text_sequence is None:
text = [str(x) for x in range(1, count + 1)]
elif isinstance(text_sequence, int):
text = [str(x) for x in range(
text_sequence, text_sequence + count + 1)]
elif isinstance(text_sequence, str):
try:
text_sequence = int(text_sequence)
text = [str(x) for x in range(
text_sequence, text_sequence + count + 1)]
except Exception as e:
text = [text_sequence] * count
elif isinstance(text_sequence, list) and len(text_sequence) != count:
print('The length of the text sequence must be equal to the number ({}) of frames in the gif.'.format(count))
return
else:
text = [str(x) for x in text_sequence]
try:
frames = []
for index, frame in enumerate(ImageSequence.Iterator(image)):
frame = frame.convert('RGB')
draw = ImageDraw.Draw(frame)
draw.text(xy, text[index], font=font, fill=color)
if add_progress_bar:
draw.rectangle(
progress_bar_shapes[index], fill=progress_bar_color)
del draw
b = io.BytesIO()
frame.save(b, format="GIF")
frame = Image.open(b)
frames.append(frame)
frames[0].save(out_gif, save_all=True,
append_images=frames[1:], duration=duration, loop=loop, optimize=True)
except Exception as e:
print(e)
def open_image_from_url(url):
from PIL import Image
import requests
from io import BytesIO
from urllib.parse import urlparse
try:
response = requests.get(url)
img = Image.open(BytesIO(response.content))
return img
except Exception as e:
print(e)
def has_transparency(img):
if img.mode == "P":
transparent = img.info.get("transparency", -1)
for _, index in img.getcolors():
if index == transparent:
return True
elif img.mode == "RGBA":
extrema = img.getextrema()
if extrema[3][0] < 255:
return True
return False
def add_image_to_gif(in_gif, out_gif, in_image, xy=None, image_size=(80, 80), circle_mask=False):
import io
import warnings
from PIL import Image, ImageDraw, ImageSequence, ImageFilter
warnings.simplefilter('ignore')
in_gif = os.path.abspath(in_gif)
is_url = False
if in_image.startswith('http'):
is_url = True
if not os.path.exists(in_gif):
print('The input gif file does not exist.')
return
if (not is_url) and (not os.path.exists(in_image)):
print('The provided logo file does not exist.')
return
if not os.path.exists(os.path.dirname(out_gif)):
os.makedirs(os.path.dirname(out_gif))
try:
image = Image.open(in_gif)
except Exception as e:
print('An error occurred while opening the image.')
print(e)
return
try:
if in_image.startswith('http'):
logo_raw_image = open_image_from_url(in_image)
else:
in_image = os.path.abspath(in_image)
logo_raw_image = Image.open(in_image)
except Exception as e:
print(e)
logo_raw_size = logo_raw_image.size
image_size = min(logo_raw_size[0], image_size[0]), min(
logo_raw_size[1], image_size[1])
logo_image = logo_raw_image.convert('RGBA')
logo_image.thumbnail(image_size, Image.ANTIALIAS)
W, H = image.size
mask_im = None
if circle_mask:
mask_im = Image.new("L", image_size, 0)
draw = ImageDraw.Draw(mask_im)
draw.ellipse((0, 0, image_size[0], image_size[1]), fill=255)
if has_transparency(logo_raw_image):
mask_im = logo_image.copy()
if xy is None:
xy = (int(0.05 * W), int(0.05 * H))
elif (xy is not None) and (not isinstance(xy, tuple)) and (len(xy) == 2):
print("xy must be a tuple, e.g., (10, 10), ('10%', '10%')")
return
elif all(isinstance(item, int) for item in xy) and (len(xy) == 2):
x, y = xy
if (x > 0) and (x < W) and (y > 0) and (y < H):
pass
else:
print(
'xy is out of bounds. x must be within [0, {}], and y must be within [0, {}]'.format(W, H))
return
elif all(isinstance(item, str) for item in xy) and (len(xy) == 2):
x, y = xy
if ('%' in x) and ('%' in y):
try:
x = int(float(x.replace('%', '')) / 100.0 * W)
y = int(float(y.replace('%', '')) / 100.0 * H)
xy = (x, y)
except Exception as e:
print(
"The specified xy is invalid. It must be formatted like this ('10%', '10%')")
return
else:
print("The specified xy is invalid. It must be formatted like this: (10, 10) or ('10%', '10%')")
return
try:
frames = []
for index, frame in enumerate(ImageSequence.Iterator(image)):
frame = frame.convert('RGBA')
frame.paste(logo_image, xy, mask_im)
b = io.BytesIO()
frame.save(b, format="GIF")
frame = Image.open(b)
frames.append(frame)
frames[0].save(out_gif, save_all=True, append_images=frames[1:])
except Exception as e:
print(e)
def show_image(img_path, width=None, height=None):
from IPython.display import display
try:
out = widgets.Output()
out.clear_output(wait=True)
display(out)
with out:
file = open(img_path, "rb")
image = file.read()
if (width is None) and (height is None):
display(widgets.Image(value=image))
elif (width is not None) and (height is not None):
display(widgets.Image(value=image, width=width, height=height))
else:
print('You need set both width and height.')
return
except Exception as e:
print(e)
def legend_from_ee(ee_class_table):
try:
ee_class_table = ee_class_table.strip()
lines = ee_class_table.split('\n')[1:]
if lines[0] == 'Value\tColor\tDescription':
lines = lines[1:]
legend_dict = {}
for index, line in enumerate(lines):
items = line.split("\t")
items = [item.strip() for item in items]
color = items[1]
key = items[0] + " " + items[2]
legend_dict[key] = color
return legend_dict
except Exception as e:
print(e)
def ee_tile_layer(ee_object, vis_params={}, name='Layer untitled', shown=True, opacity=1.0):
image = None
if not isinstance(ee_object, ee.Image) and not isinstance(ee_object, ee.ImageCollection) and not isinstance(ee_object, ee.FeatureCollection) and not isinstance(ee_object, ee.Feature) and not isinstance(ee_object, ee.Geometry):
err_str = "\n\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):
features = ee.FeatureCollection(ee_object)
width = 2
if 'width' in vis_params:
width = vis_params['width']
color = '000000'
if 'color' in vis_params:
color = vis_params['color']
image_fill = features.style(
**{'fillColor': color}).updateMask(ee.Image.constant(0.5))
image_outline = features.style(
**{'color': color, 'fillColor': '00000000', 'width': width})
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
map_id_dict = ee.Image(image).getMapId(vis_params)
tile_layer = ipyleaflet.TileLayer(
url=map_id_dict['tile_fetcher'].url_format,
attribution='Google Earth Engine',
name=name,
opacity=opacity,
visible=True
)
return tile_layer
def geojson_to_ee(geo_json, geodesic=True):
try:
import json
if not isinstance(geo_json, dict) and os.path.isfile(geo_json):
with open(os.path.abspath(geo_json)) as f:
geo_json = json.load(f)
if geo_json['type'] == 'FeatureCollection':
features = ee.FeatureCollection(geo_json['features'])
return features
elif geo_json['type'] == 'Feature':
geom = None
keys = geo_json['properties']['style'].keys()
if 'radius' in keys:
geom = ee.Geometry(geo_json['geometry'])
radius = geo_json['properties']['style']['radius']
geom = geom.buffer(radius)
elif geo_json['geometry']['type'] == 'Point':
coordinates = geo_json['geometry']['coordinates']
longitude = coordinates[0]
latitude = coordinates[1]
geom = ee.Geometry.Point(longitude, latitude)
else:
geom = ee.Geometry(geo_json['geometry'], "", geodesic)
return geom
else:
print("Could not convert the geojson to ee.Geometry()")
except Exception as e:
print("Could not convert the geojson to ee.Geometry()")
print(e)
def ee_to_geojson(ee_object, out_json=None):
from json import dumps
try:
if isinstance(ee_object, ee.geometry.Geometry) or isinstance(ee_object, ee.feature.Feature) or isinstance(ee_object, ee.featurecollection.FeatureCollection):
json_object = ee_object.getInfo()
if out_json is not None:
out_json = os.path.abspath(out_json)
if not os.path.exists(os.path.dirname(out_json)):
os.makedirs(os.path.dirname(out_json))
geojson = open(out_json, "w")
geojson.write(
dumps({"type": "FeatureCollection", "features": json_object}, indent=2) + "\n")
geojson.close()
return json_object
else:
print("Could not convert the Earth Engine object to geojson")
except Exception as e:
print(e)
def open_github(subdir=None):
import webbrowser
url = 'https://github.com/giswqs/geemap'
if subdir == 'source':
url += '/tree/master/geemap/'
elif subdir == 'examples':
url += '/tree/master/examples'
elif subdir == 'tutorials':
url += '/tree/master/tutorials'
webbrowser.open_new_tab(url)
def clone_repo(out_dir='.', unzip=True):
url = 'https://github.com/giswqs/geemap/archive/master.zip'
filename = 'geemap-master.zip'
download_from_url(url, out_file_name=filename,
out_dir=out_dir, unzip=unzip)
def open_youtube():
import webbrowser
url = 'https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPccOFv1dCwvGI6TYnirRTg3'
webbrowser.open_new_tab(url)
def api_docs():
import webbrowser
url = 'https://giswqs.github.io/geemap/geemap'
webbrowser.open_new_tab(url)
def show_youtube(id='h0pz3S6Tvx0'):
from IPython.display import YouTubeVideo, display
try:
out = widgets.Output(
layout={'width': '815px'})
out.clear_output(wait=True)
display(out)
with out:
display(YouTubeVideo(id, width=800, height=450))
except Exception as e:
print(e)
def check_install(package):
import subprocess
try:
__import__(package)
except ImportError:
print('{} is not installed. Installing ...'.format(package))
try:
subprocess.check_call(["python", '-m', 'pip', 'install', package])
except Exception as e:
print('Failed to install {}'.format(package))
print(e)
print("{} has been installed successfully.".format(package))
def update_package():
import shutil
try:
download_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
if not os.path.exists(download_dir):
os.makedirs(download_dir)
clone_repo(out_dir=download_dir)
pkg_dir = os.path.join(download_dir, 'geemap-master')
work_dir = os.getcwd()
os.chdir(pkg_dir)
if shutil.which('pip') is None:
cmd = 'pip3 install .'
else:
cmd = 'pip install .'
os.system(cmd)
os.chdir(work_dir)
print("\nPlease comment out 'geemap.update_package()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output")
except Exception as e:
print(e)
def csv_to_shp(in_csv, out_shp, longitude='longitude', latitude='latitude'):
import csv
import shapefile as shp
if not os.path.exists(in_csv):
print('The provided CSV file does not exist.')
return
if not in_csv.endswith('.csv'):
print('The input file must end with .csv')
return
out_dir = os.path.dirname(out_shp)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
try:
points = shp.Writer(out_shp, shapeType=shp.POINT)
with open(in_csv) as csvfile:
csvreader = csv.DictReader(csvfile)
header = csvreader.fieldnames
[points.field(field) for field in header]
for row in csvreader:
points.point((float(row[longitude])), (float(row[latitude])))
points.record(*tuple([row[f] for f in header]))
out_prj = out_shp.replace('.shp', '.prj')
with open(out_prj, 'w') as f:
prj_str = 'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.0174532925199433]] '
f.write(prj_str)
except Exception as e:
print(e)
def shp_to_geojson(in_shp, out_json=None):
try:
import json
import shapefile
in_shp = os.path.abspath(in_shp)
if out_json is None:
out_json = os.path.splitext(in_shp)[0] + ".json"
if os.path.exists(out_json):
out_json = out_json.replace('.json', '_bk.json')
elif not os.path.exists(os.path.dirname(out_json)):
os.makedirs(os.path.dirname(out_json))
reader = shapefile.Reader(in_shp)
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
buffer = []
for sr in reader.shapeRecords():
atr = dict(zip(field_names, sr.record))
geom = sr.shape.__geo_interface__
buffer.append(dict(type="Feature", geometry=geom, properties=atr))
from json import dumps
geojson = open(out_json, "w")
geojson.write(dumps({"type": "FeatureCollection",
"features": buffer}, indent=2) + "\n")
geojson.close()
with open(out_json) as f:
json_data = json.load(f)
return json_data
except Exception as e:
print(e)
def shp_to_ee(in_shp):
try:
json_data = shp_to_geojson(in_shp)
ee_object = geojson_to_ee(json_data)
return ee_object
except Exception as e:
print(e)
def filter_polygons(ftr):
geometries = ftr.geometry().geometries()
geometries = geometries.map(lambda geo: ee.Feature(
ee.Geometry(geo)).set('geoType', ee.Geometry(geo).type()))
polygons = ee.FeatureCollection(geometries).filter(
ee.Filter.eq('geoType', 'Polygon')).geometry()
return ee.Feature(polygons).copyProperties(ftr)
def ee_export_vector(ee_object, filename, selectors=None):
import requests
import zipfile
if not isinstance(ee_object, ee.FeatureCollection):
raise ValueError('ee_object must be an ee.FeatureCollection')
allowed_formats = ['csv', 'geojson', 'kml', 'kmz', 'shp']
filename = os.path.abspath(filename)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:].lower()
if filetype == 'shp':
filename = filename.replace('.shp', '.zip')
if not (filetype.lower() in allowed_formats):
print('The file type must be one of the following: {}'.format(
', '.join(allowed_formats)))
print('Earth Engine no longer supports downloading featureCollection as shapefile or json. \nPlease use geemap.ee_export_vector_to_drive() to export featureCollection to Google Drive.')
raise ValueError
if selectors is None:
selectors = ee_object.first().propertyNames().getInfo()
if filetype == 'csv':
ee_object = ee_object.select([".*"], None, False)
if filetype == 'geojson':
selectors = ['.geo'] + selectors
elif not isinstance(selectors, list):
raise ValueError(
"selectors must be a list, such as ['attribute1', 'attribute2']")
else:
allowed_attributes = ee_object.first().propertyNames().getInfo()
for attribute in selectors:
if not (attribute in allowed_attributes):
raise ValueError('Attributes must be one chosen from: {} '.format(
', '.join(allowed_attributes)))
try:
print('Generating URL ...')
url = ee_object.getDownloadURL(
filetype=filetype, selectors=selectors, filename=name)
print('Downloading data from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
if r.status_code != 200:
print('An error occurred while downloading. \n Retrying ...')
try:
new_ee_object = ee_object.map(filter_polygons)
print('Generating URL ...')
url = new_ee_object.getDownloadURL(
filetype=filetype, selectors=selectors, filename=name)
print('Downloading data from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
except Exception as e:
print(e)
raise ValueError
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
except Exception as e:
print('An error occurred while downloading.')
raise ValueError(e)
try:
if filetype == 'shp':
z = zipfile.ZipFile(filename)
z.extractall(os.path.dirname(filename))
os.remove(filename)
filename = filename.replace('.zip', '.shp')
print('Data downloaded to {}'.format(filename))
except Exception as e:
raise ValueError(e)
def ee_export_vector_to_drive(ee_object, description, folder, file_format='shp', selectors=None):
if not isinstance(ee_object, ee.FeatureCollection):
print('The ee_object must be an ee.FeatureCollection.')
return
allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp', 'tfrecord']
if not (file_format.lower() in allowed_formats):
print('The file type must be one of the following: {}'.format(
', '.join(allowed_formats)))
return
task_config = {
'folder': folder,
'fileFormat': file_format,
}
if selectors is not None:
task_config['selectors'] = selectors
elif (selectors is None) and (file_format.lower() == 'csv'):
ee_object = ee_object.select([".*"], None, False)
print('Exporting {}...'.format(description))
task = ee.batch.Export.table.toDrive(ee_object, description, **task_config)
task.start()
def ee_export_geojson(ee_object, filename=None, selectors=None):
import requests
import zipfile
if not isinstance(ee_object, ee.FeatureCollection):
print('The ee_object must be an ee.FeatureCollection.')
return
if filename is None:
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
filename = os.path.join(out_dir, random_string(6) + '.geojson')
allowed_formats = ['geojson']
filename = os.path.abspath(filename)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:].lower()
if not (filetype.lower() in allowed_formats):
print('The output file type must be geojson.')
return
if selectors is None:
selectors = ee_object.first().propertyNames().getInfo()
selectors = ['.geo'] + selectors
elif not isinstance(selectors, list):
print("selectors must be a list, such as ['attribute1', 'attribute2']")
return
else:
allowed_attributes = ee_object.first().propertyNames().getInfo()
for attribute in selectors:
if not (attribute in allowed_attributes):
print('Attributes must be one chosen from: {} '.format(
', '.join(allowed_attributes)))
return
try:
url = ee_object.getDownloadURL(
filetype=filetype, selectors=selectors, filename=name)
r = requests.get(url, stream=True)
if r.status_code != 200:
print('An error occurred while downloading. \n Retrying ...')
try:
new_ee_object = ee_object.map(filter_polygons)
print('Generating URL ...')
url = new_ee_object.getDownloadURL(
filetype=filetype, selectors=selectors, filename=name)
print('Downloading data from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
except Exception as e:
print(e)
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
except Exception as e:
print('An error occurred while downloading.')
print(e)
return
with open(filename) as f:
geojson = f.read()
return geojson
def ee_to_shp(ee_object, filename, selectors=None):
try:
if filename.lower().endswith('.shp'):
ee_export_vector(ee_object=ee_object,
filename=filename, selectors=selectors)
else:
print('The filename must end with .shp')
except Exception as e:
print(e)
def ee_to_csv(ee_object, filename, selectors=None):
try:
if filename.lower().endswith('.csv'):
ee_export_vector(ee_object=ee_object,
filename=filename, selectors=selectors)
else:
print('The filename must end with .csv')
except Exception as e:
print(e)
def ee_export_image(ee_object, filename, scale=None, crs=None, region=None, file_per_band=False):
import requests
import zipfile
if not isinstance(ee_object, ee.Image):
print('The ee_object must be an ee.Image.')
return
filename = os.path.abspath(filename)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:].lower()
filename_zip = filename.replace('.tif', '.zip')
if filetype != 'tif':
print('The filename must end with .tif')
return
try:
print('Generating URL ...')
params = {'name': name, 'filePerBand': file_per_band}
if scale is None:
scale = ee_object.projection().nominalScale().multiply(10)
params['scale'] = scale
if region is None:
region = ee_object.geometry()
params['region'] = region
if crs is not None:
params['crs'] = crs
url = ee_object.getDownloadURL(params)
print('Downloading data from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
if r.status_code != 200:
print('An error occurred while downloading.')
return
with open(filename_zip, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
except Exception as e:
print('An error occurred while downloading.')
print(e)
return
try:
z = zipfile.ZipFile(filename_zip)
z.extractall(os.path.dirname(filename))
z.close()
os.remove(filename_zip)
if file_per_band:
print('Data downloaded to {}'.format(os.path.dirname(filename)))
else:
print('Data downloaded to {}'.format(filename))
except Exception as e:
print(e)
def ee_export_image_collection(ee_object, out_dir, scale=None, crs=None, region=None, file_per_band=False):
import requests
import zipfile
if not isinstance(ee_object, ee.ImageCollection):
print('The ee_object must be an ee.ImageCollection.')
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
try:
count = int(ee_object.size().getInfo())
print("Total number of images: {}\n".format(count))
for i in range(0, count):
image = ee.Image(ee_object.toList(count).get(i))
name = image.get('system:index').getInfo() + '.tif'
filename = os.path.join(os.path.abspath(out_dir), name)
print('Exporting {}/{}: {}'.format(i+1, count, name))
ee_export_image(image, filename=filename, scale=scale,
crs=crs, region=region, file_per_band=file_per_band)
print('\n')
except Exception as e:
print(e)
def ee_export_image_to_drive(ee_object, description, folder=None, region=None, scale=None, crs=None, max_pixels=1.0E13, file_format='GeoTIFF'):
if not isinstance(ee_object, ee.Image):
print('The ee_object must be an ee.Image.')
return
try:
params = {}
if folder is not None:
params['driveFolder'] = folder
if region is not None:
params['region'] = region
if scale is None:
scale = ee_object.projection().nominalScale().multiply(10)
params['scale'] = scale
if crs is not None:
params['crs'] = crs
params['maxPixels'] = max_pixels
params['fileFormat'] = file_format
task = ee.batch.Export.image(ee_object, description, params)
task.start()
print('Exporting {} ...'.format(description))
except Exception as e:
print(e)
def ee_export_image_collection_to_drive(ee_object, descriptions=None, folder=None, region=None, scale=None, crs=None, max_pixels=1.0E13, file_format='GeoTIFF'):
if not isinstance(ee_object, ee.ImageCollection):
print('The ee_object must be an ee.ImageCollection.')
return
try:
count = int(ee_object.size().getInfo())
print("Total number of images: {}\n".format(count))
if (descriptions is not None) and (len(descriptions) != count):
print('The number of descriptions is not equal to the number of images.')
return
if descriptions is None:
descriptions = ee_object.aggregate_array('system:index').getInfo()
images = ee_object.toList(count)
for i in range(0, count):
image = ee.Image(images.get(i))
name = descriptions[i]
ee_export_image_to_drive(
image, name, folder, region, scale, crs, max_pixels, file_format)
except Exception as e:
print(e)
def ee_to_numpy(ee_object, bands=None, region=None, properties=None, default_value=None):
import numpy as np
if not isinstance(ee_object, ee.Image):
print('The input must be an ee.Image.')
return
if region is None:
region = ee_object.geometry()
try:
if bands is not None:
ee_object = ee_object.select(bands)
else:
bands = ee_object.bandNames().getInfo()
band_count = len(bands)
band_arrs = ee_object.sampleRectangle(
region=region, properties=properties, defaultValue=default_value)
band_values = []
for band in bands:
band_arr = band_arrs.get(band).getInfo()
band_value = np.array(band_arr)
band_values.append(band_value)
image = np.dstack(band_values)
return image
except Exception as e:
print(e)
def download_ee_video(collection, video_args, out_gif):
import requests
out_gif = os.path.abspath(out_gif)
if not out_gif.endswith(".gif"):
print('The output file must have an extension of .gif.')
return
if not os.path.exists(os.path.dirname(out_gif)):
os.makedirs(os.path.dirname(out_gif))
if 'region' in video_args.keys():
roi = video_args['region']
if not isinstance(roi, ee.Geometry):
try:
roi = roi.geometry()
except Exception as e:
print('Could not convert the provided roi to ee.Geometry')
print(e)
return
video_args['region'] = roi
try:
print('Generating URL...')
url = collection.getVideoThumbURL(video_args)
print('Downloading GIF image from {}\nPlease wait ...'.format(url))
r = requests.get(url, stream=True)
if r.status_code != 200:
print('An error occurred while downloading.')
return
else:
with open(out_gif, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
print('The GIF image has been saved to: {}'.format(out_gif))
except Exception as e:
print(e)
def zonal_statistics(in_value_raster, in_zone_vector, out_file_path, statistics_type='MEAN', scale=None, crs=None, tile_scale=1.0, **kwargs):
if not isinstance(in_value_raster, ee.Image):
print('The input raster must be an ee.Image.')
return
if not isinstance(in_zone_vector, ee.FeatureCollection):
print('The input zone data must be an ee.FeatureCollection.')
return
allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']
filename = os.path.abspath(out_file_path)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:].lower()
if not (filetype in allowed_formats):
print('The file type must be one of the following: {}'.format(
', '.join(allowed_formats)))
return
max_buckets = None
min_bucket_width = None
max_raw = None
hist_min = 1.0
hist_max = 100.0
hist_steps = 10
if 'max_buckets' in kwargs.keys():
max_buckets = kwargs['max_buckets']
if 'min_bucket_width' in kwargs.keys():
min_bucket_width = kwargs['min_bucket']
if 'max_raw' in kwargs.keys():
max_raw = kwargs['max_raw']
if statistics_type.upper() == 'FIXED_HIST' and ('hist_min' in kwargs.keys()) and ('hist_max' in kwargs.keys()) and ('hist_steps' in kwargs.keys()):
hist_min = kwargs['hist_min']
hist_max = kwargs['hist_max']
hist_steps = kwargs['hist_steps']
elif statistics_type.upper() == 'FIXED_HIST':
print('To use fixedHistogram, please provide these three parameters: hist_min, hist_max, and hist_steps.')
return
allowed_statistics = {
'MEAN': ee.Reducer.mean(),
'MAXIMUM': ee.Reducer.max(),
'MEDIAN': ee.Reducer.median(),
'MINIMUM': ee.Reducer.min(),
'STD': ee.Reducer.stdDev(),
'MIN_MAX': ee.Reducer.minMax(),
'SUM': ee.Reducer.sum(),
'VARIANCE': ee.Reducer.variance(),
'HIST': ee.Reducer.histogram(maxBuckets=max_buckets, minBucketWidth=min_bucket_width, maxRaw=max_raw),
'FIXED_HIST': ee.Reducer.fixedHistogram(hist_min, hist_max, hist_steps)
}
if not (statistics_type.upper() in allowed_statistics.keys()):
print('The statistics type must be one of the following: {}'.format(
', '.join(list(allowed_statistics.keys()))))
return
if scale is None:
scale = in_value_raster.projection().nominalScale().multiply(10)
try:
print('Computing statistics ...')
result = in_value_raster.reduceRegions(
collection=in_zone_vector, reducer=allowed_statistics[statistics_type], scale=scale, crs=crs, tileScale=tile_scale)
ee_export_vector(result, filename)
except Exception as e:
print(e)
def zonal_statistics_by_group(in_value_raster, in_zone_vector, out_file_path, statistics_type='SUM', decimal_places=0, denominator=1.0, scale=None, crs=None, tile_scale=1.0):
if not isinstance(in_value_raster, ee.Image):
print('The input raster must be an ee.Image.')
return
band_count = in_value_raster.bandNames().size().getInfo()
band_name = ''
if band_count == 1:
band_name = in_value_raster.bandNames().get(0)
else:
print('The input image can only have one band.')
return
band_types = in_value_raster.bandTypes().get(band_name).getInfo()
band_type = band_types.get('precision')
if band_type != 'int':
print('The input image band must be integer type.')
return
if not isinstance(in_zone_vector, ee.FeatureCollection):
print('The input zone data must be an ee.FeatureCollection.')
return
allowed_formats = ['csv', 'json', 'kml', 'kmz', 'shp']
filename = os.path.abspath(out_file_path)
basename = os.path.basename(filename)
name = os.path.splitext(basename)[0]
filetype = os.path.splitext(basename)[1][1:]
if not (filetype.lower() in allowed_formats):
print('The file type must be one of the following: {}'.format(
', '.join(allowed_formats)))
return
out_dir = os.path.dirname(filename)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
allowed_statistics = ['SUM', 'PERCENTAGE']
if not (statistics_type.upper() in allowed_statistics):
print('The statistics type can only be one of {}'.format(
', '.join(allowed_statistics)))
return
if scale is None:
scale = in_value_raster.projection().nominalScale().multiply(10)
try:
print('Computing ... ')
geometry = in_zone_vector.geometry()
hist = in_value_raster.reduceRegion(ee.Reducer.frequencyHistogram(
), geometry=geometry, bestEffort=True, scale=scale)
class_values = ee.Dictionary(hist.get(band_name)).keys().map(
lambda v: ee.Number.parse(v)).sort()
class_names = class_values.map(
lambda c: ee.String('Class_').cat(ee.Number(c).format()))
class_count = class_values.size().getInfo()
dataset = ee.Image.pixelArea().divide(denominator).addBands(in_value_raster)
init_result = dataset.reduceRegions(**{
'collection': in_zone_vector,
'reducer': ee.Reducer.sum().group(**{
'groupField': 1,
'groupName': 'group',
}),
'scale': scale
})
def build_dict(input_list):
decimal_format = '%.{}f'.format(decimal_places)
in_dict = input_list.map(lambda x: ee.Dictionary().set(ee.String('Class_').cat(
ee.Number(ee.Dictionary(x).get('group')).format()), ee.Number.parse(ee.Number(ee.Dictionary(x).get('sum')).format(decimal_format))))
return in_dict
def get_keys(input_list):
return input_list.map(lambda x: ee.String('Class_').cat(ee.Number(ee.Dictionary(x).get('group')).format()))
def get_values(input_list):
decimal_format = '%.{}f'.format(decimal_places)
return input_list.map(lambda x: ee.Number.parse(ee.Number(ee.Dictionary(x).get('sum')).format(decimal_format)))
def set_attribute(f):
groups = ee.List(f.get('groups'))
keys = get_keys(groups)
values = get_values(groups)
total_area = ee.List(values).reduce(ee.Reducer.sum())
def get_class_values(x):
cls_value = ee.Algorithms.If(
keys.contains(x), values.get(keys.indexOf(x)), 0)
cls_value = ee.Algorithms.If(ee.String(statistics_type).compareTo(ee.String(
'SUM')), ee.Number(cls_value).divide(ee.Number(total_area)), cls_value)
return cls_value
full_values = class_names.map(lambda x: get_class_values(x))
attr_dict = ee.Dictionary.fromLists(class_names, full_values)
attr_dict = attr_dict.set('Class_sum', total_area)
return f.set(attr_dict).set('groups', None)
final_result = init_result.map(set_attribute)
ee_export_vector(final_result, filename)
except Exception as e:
print(e)
def create_colorbar(width=150, height=30, palette=['blue', 'green', 'red'], add_ticks=True, add_labels=True, labels=None, vertical=False, out_file=None, font_type='arial.ttf', font_size=12, font_color='black', add_outline=True, outline_color='black'):
import decimal
import io
import pkg_resources
import warnings
from colour import Color
from PIL import Image, ImageDraw, ImageFont
warnings.simplefilter('ignore')
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py"))
if out_file is None:
filename = 'colorbar_' + random_string() + '.png'
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
out_file = os.path.join(out_dir, filename)
elif not out_file.endswith('.png'):
print('The output file must end with .png')
return
else:
out_file = os.path.abspath(out_file)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
im = Image.new('RGBA', (width, height))
ld = im.load()
def float_range(start, stop, step):
while start < stop:
yield float(start)
start += decimal.Decimal(step)
n_colors = len(palette)
decimal_places = 2
rgb_colors = [Color(check_color(c)).rgb for c in palette]
keys = [round(c, decimal_places)
for c in list(float_range(0, 1.0001, 1.0/(n_colors - 1)))]
heatmap = []
for index, item in enumerate(keys):
pair = [item, rgb_colors[index]]
heatmap.append(pair)
def gaussian(x, a, b, c, d=0):
return a * math.exp(-(x - b)**2 / (2 * c**2)) + d
def pixel(x, width=100, map=[], spread=1):
width = float(width)
r = sum([gaussian(x, p[1][0], p[0] * width, width/(spread*len(map)))
for p in map])
g = sum([gaussian(x, p[1][1], p[0] * width, width/(spread*len(map)))
for p in map])
b = sum([gaussian(x, p[1][2], p[0] * width, width/(spread*len(map)))
for p in map])
return min(1.0, r), min(1.0, g), min(1.0, b)
for x in range(im.size[0]):
r, g, b = pixel(x, width=width, map=heatmap)
r, g, b = [int(256*v) for v in (r, g, b)]
for y in range(im.size[1]):
ld[x, y] = r, g, b
if add_outline:
draw = ImageDraw.Draw(im)
draw.rectangle([(0, 0), (width-1, height-1)],
outline=check_color(outline_color))
del draw
if add_ticks:
tick_length = height * 0.1
x = [key * width for key in keys]
y_top = height - tick_length
y_bottom = height
draw = ImageDraw.Draw(im)
for i in x:
shape = [(i, y_top), (i, y_bottom)]
draw.line(shape, fill='black', width=0)
del draw
if vertical:
im = im.transpose(Image.ROTATE_90)
width, height = im.size
if labels is None:
labels = [str(c) for c in keys]
elif len(labels) == 2:
try:
lowerbound = float(labels[0])
upperbound = float(labels[1])
step = (upperbound - lowerbound) / (len(palette) - 1)
labels = [str(lowerbound + c * step)
for c in range(0, len(palette))]
except Exception as e:
print(e)
print('The labels are invalid.')
return
elif len(labels) == len(palette):
labels = [str(c) for c in labels]
else:
print('The labels must have the same length as the palette.')
return
if add_labels:
default_font = os.path.join(pkg_dir, 'data/fonts/arial.ttf')
if font_type == 'arial.ttf':
font = ImageFont.truetype(default_font, font_size)
else:
try:
font_list = system_fonts(show_full_path=True)
font_names = [os.path.basename(f) for f in font_list]
if (font_type in font_list) or (font_type in font_names):
font = ImageFont.truetype(font_type, font_size)
else:
print(
'The specified font type could not be found on your system. Using the default font instead.')
font = ImageFont.truetype(default_font, font_size)
except Exception as e:
print(e)
font = ImageFont.truetype(default_font, font_size)
font_color = check_color(font_color)
draw = ImageDraw.Draw(im)
w, h = draw.textsize(labels[0], font=font)
for label in labels:
w_tmp, h_tmp = draw.textsize(label, font)
if w_tmp > w:
w = w_tmp
if h_tmp > h:
h = h_tmp
W, H = width + w * 2, height + h * 2
background = Image.new('RGBA', (W, H))
draw = ImageDraw.Draw(background)
if vertical:
xy = (0, h)
else:
xy = (w, 0)
background.paste(im, xy, im)
for index, label in enumerate(labels):
w_tmp, h_tmp = draw.textsize(label, font)
if vertical:
spacing = 5
x = width + spacing
y = int(height + h - keys[index] * height - h_tmp / 2 - 1)
draw.text((x, y), label, font=font, fill=font_color)
else:
x = int(keys[index] * width + w - w_tmp / 2)
spacing = int(h * 0.05)
y = height + spacing
draw.text((x, y), label, font=font, fill=font_color)
im = background.copy()
im.save(out_file)
return out_file
def naip_timeseries(roi=None, start_year=2009, end_year=2018):
try:
def get_annual_NAIP(year):
try:
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
if roi is not None:
collection = collection.filterBounds(roi)
start_date = ee.Date.fromYMD(year, 1, 1)
end_date = ee.Date.fromYMD(year, 12, 31)
naip = collection.filterDate(start_date, end_date) \
.filter(ee.Filter.listContains("system:band_names", "N"))
naip = ee.Image(ee.ImageCollection(naip).mosaic())
return naip
except Exception as e:
print(e)
years = ee.List.sequence(start_year, end_year)
collection = years.map(get_annual_NAIP)
return collection
except Exception as e:
print(e)
def sentinel2_timeseries(roi=None, start_year=2015, end_year=2019, start_date='01-01', end_date='12-31'):
widgets.HBox()
left_widget = widgets.VBox()
right_widget = widgets.VBox()
import_btn = widgets.Button(
description='import', button_style='primary', tooltip='import the content to a new cell', disabled=True)
import_btn.layout.width = '70px'
path_widget = widgets.Text()
path_widget.layout.min_width = '400px'
save_widget = widgets.Button(
description='Save', button_style='primary', tooltip='Save edits to file.', disabled=True)
info_widget = widgets.HBox()
info_widget.children = [path_widget, save_widget]
if use_import:
info_widget.children = [import_btn, path_widget, save_widget]
text_widget = widgets.Textarea()
text_widget.layout.width = '630px'
text_widget.layout.height = '600px'
right_widget.children = [info_widget, text_widget]
full_widget.children = [left_widget]
if search_description is None:
search_description = 'Search files/folders...'
search_box = widgets.Text(placeholder=search_description)
search_box.layout.width = '310px'
tree_widget = widgets.Output()
tree_widget.layout.max_width = '310px'
tree_widget.overflow = 'auto'
left_widget.children = [search_box, tree_widget]
tree = Tree(multiple_selection=False)
tree_dict = {}
def on_button_clicked(b):
content = text_widget.value
out_file = path_widget.value
out_dir = os.path.dirname(out_file)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(out_file, 'w') as f:
f.write(content)
text_widget.disabled = True
text_widget.value = 'The content has been saved successfully.'
save_widget.disabled = True
path_widget.disabled = True
if (out_file not in tree_dict.keys()) and (out_dir in tree_dict.keys()):
node = Node(os.path.basename(out_file))
tree_dict[out_file] = node
parent_node = tree_dict[out_dir]
parent_node.add_node(node)
save_widget.on_click(on_button_clicked)
def import_btn_clicked(b):
if (text_widget.value != '') and (path_widget.value.endswith('.py')):
create_code_cell(text_widget.value)
import_btn.on_click(import_btn_clicked)
def search_box_callback(text):
with tree_widget:
if text.value == '':
print('Loading...')
tree_widget.clear_output(wait=True)
display(tree)
else:
tree_widget.clear_output()
print('Searching...')
tree_widget.clear_output(wait=True)
sub_tree = search_api_tree(text.value, tree_dict)
display(sub_tree)
search_box.on_submit(search_box_callback)
def handle_file_click(event):
if event['new']:
cur_node = event['owner']
for key in tree_dict.keys():
if (cur_node is tree_dict[key]) and (os.path.isfile(key)):
if key.endswith('.py'):
import_btn.disabled = False
else:
import_btn.disabled = True
try:
with open(key) as f:
content = f.read()
text_widget.value = content
text_widget.disabled = False
path_widget.value = key
path_widget.disabled = False
save_widget.disabled = False
full_widget.children = [left_widget, right_widget]
except Exception as e:
path_widget.value = key
path_widget.disabled = True
save_widget.disabled = True
text_widget.disabled = True
text_widget.value = 'Failed to open {}.'.format(
cur_node.name) + '\n\n' + str(e)
full_widget.children = [left_widget, right_widget]
return
break
def handle_folder_click(event):
if event['new']:
full_widget.children = [left_widget]
text_widget.value = ''
if add_root_node:
root_name = in_dir.split(sep)[-1]
root_node = Node(root_name)
tree_dict[in_dir] = root_node
tree.add_node(root_node)
root_node.observe(handle_folder_click, 'selected')
for root, d_names, f_names in os.walk(in_dir):
if not show_hidden:
folders = root.split(sep)
for folder in folders:
if folder.startswith('.'):
continue
for d_name in d_names:
if d_name.startswith('.'):
d_names.remove(d_name)
for f_name in f_names:
if f_name.startswith('.'):
f_names.remove(f_name)
d_names.sort()
f_names.sort()
if (not add_root_node) and (root == in_dir):
for d_name in d_names:
node = Node(d_name)
tree_dict[os.path.join(in_dir, d_name)] = node
tree.add_node(node)
node.opened = False
node.observe(handle_folder_click, 'selected')
if (root != in_dir) and (root not in tree_dict.keys()):
name = root.split(sep)[-1]
dir_name = os.path.dirname(root)
parent_node = tree_dict[dir_name]
node = Node(name)
tree_dict[root] = node
parent_node.add_node(node)
node.observe(handle_folder_click, 'selected')
if len(f_names) > 0:
parent_node = tree_dict[root]
parent_node.opened = False
for f_name in f_names:
node = Node(f_name)
node.icon = 'file'
full_path = os.path.join(root, f_name)
tree_dict[full_path] = node
parent_node.add_node(node)
node.observe(handle_file_click, 'selected')
with tree_widget:
tree_widget.clear_output()
display(tree)
if return_sep_widgets:
return left_widget, right_widget, tree_dict
else:
return full_widget
def check_git_install():
import webbrowser
cmd = 'git --version'
output = os.popen(cmd).read()
if 'git version' in output:
return True
else:
url = 'https://git-scm.com/downloads'
print(
"Git is not installed. Please download Git from {} and install it.".format(url))
webbrowser.open_new_tab(url)
return False
def clone_github_repo(url, out_dir):
import zipfile
repo_name = os.path.basename(url)
url_zip = url + '/archive/master.zip'
if os.path.exists(out_dir):
print(
'The specified output directory already exists. Please choose a new directory.')
return
parent_dir = os.path.dirname(out_dir)
out_file_path = os.path.join(parent_dir, repo_name + '.zip')
try:
urllib.request.urlretrieve(url_zip, out_file_path)
except:
print("The provided URL is invalid. Please double check the URL.")
return
with zipfile.ZipFile(out_file_path, "r") as zip_ref:
zip_ref.extractall(parent_dir)
src = out_file_path.replace('.zip', '-master')
os.rename(src, out_dir)
os.remove(out_file_path)
def clone_github_repo2(url, out_dir=None):
check_install('dulwich')
from dulwich import porcelain
repo_name = os.path.basename(url)
if out_dir is None:
out_dir = os.path.join(os.getcwd(), repo_name)
if not os.path.exists(os.path.dirname(out_dir)):
os.makedirs(os.path.dirname(out_dir))
if os.path.exists(out_dir):
print(
'The specified output directory already exists. Please choose a new directory.')
return
try:
porcelain.clone(url, out_dir)
except Exception as e:
print('Failed to clone the repository.')
print(e)
def clone_google_repo(url, out_dir=None):
repo_name = os.path.basename(url)
if out_dir is None:
out_dir = os.path.join(os.getcwd(), repo_name)
if not os.path.exists(os.path.dirname(out_dir)):
os.makedirs(os.path.dirname(out_dir))
if os.path.exists(out_dir):
print(
'The specified output directory already exists. Please choose a new directory.')
return
if check_git_install():
cmd = 'git clone "{}" "{}"'.format(url, out_dir)
os.popen(cmd).read()
def reduce_gif_size(in_gif, out_gif=None):
import ffmpeg
import shutil
if not is_tool('ffmpeg'):
print('ffmpeg is not installed on your computer.')
return
if not os.path.exists(in_gif):
print('The input gif file does not exist.')
return
if out_gif is None:
out_gif = in_gif
elif not os.path.exists(os.path.dirname(out_gif)):
os.makedirs(os.path.dirname(out_gif))
if in_gif == out_gif:
tmp_gif = in_gif.replace('.gif', '_tmp.gif')
shutil.copyfile(in_gif, tmp_gif)
stream = ffmpeg.input(tmp_gif)
stream = ffmpeg.output(stream, in_gif).overwrite_output()
ffmpeg.run(stream)
os.remove(tmp_gif)
else:
stream = ffmpeg.input(in_gif)
stream = ffmpeg.output(stream, out_gif).overwrite_output()
ffmpeg.run(stream)
def upload_to_imgur(in_gif):
import subprocess
pkg_name = 'imgur-uploader'
if not is_tool(pkg_name):
check_install(pkg_name)
try:
IMGUR_API_ID = os.environ.get('IMGUR_API_ID', None)
IMGUR_API_SECRET = os.environ.get('IMGUR_API_SECRET', None)
credentials_path = os.path.join(os.path.expanduser(
'~'), '.config/imgur_uploader/uploader.cfg')
if ((IMGUR_API_ID is not None) and (IMGUR_API_SECRET is not None)) or os.path.exists(credentials_path):
proc = subprocess.Popen(
['imgur-uploader', in_gif], stdout=subprocess.PIPE)
for i in range(0, 2):
line = proc.stdout.readline()
print(line.rstrip().decode("utf-8"))
else:
print('Imgur API credentials could not be found. Please check https://pypi.org/project/imgur-uploader/ for instructions on how to get Imgur API credentials')
return
except Exception as e:
print(e)
def is_tool(name):
from shutil import which
return which(name) is not None
def image_props(img, date_format='YYYY-MM-dd'):
if not isinstance(img, ee.Image):
print('The input object must be an ee.Image')
return
keys = img.propertyNames().remove('system:footprint').remove('system:bands')
values = keys.map(lambda p: img.get(p))
bands = img.bandNames()
scales = bands.map(lambda b: img.select([b]).projection().nominalScale())
scale = ee.Algorithms.If(scales.distinct().size().gt(
1), ee.Dictionary.fromLists(bands.getInfo(), scales), scales.get(0))
image_date = ee.Date(img.get('system:time_start')).format(date_format)
time_start = ee.Date(img.get('system:time_start')
).format('YYYY-MM-dd HH:mm:ss')
time_end = ee.Algorithms.If(ee.List(img.propertyNames()).contains('system:time_end'), ee.Date(
img.get('system:time_end')).format('YYYY-MM-dd HH:mm:ss'), time_start)
asset_size = ee.Number(img.get('system:asset_size')).divide(
1e6).format().cat(ee.String(' MB'))
props = ee.Dictionary.fromLists(keys, values)
props = props.set('system:time_start', time_start)
props = props.set('system:time_end', time_end)
props = props.set('system:asset_size', asset_size)
props = props.set('NOMINAL_SCALE', scale)
props = props.set('IMAGE_DATE', image_date)
return props
def image_stats(img, region=None, scale=None):
import geemap.utils as utils
if not isinstance(img, ee.Image):
print('The input object must be an ee.Image')
return
stat_types = ['min', 'max', 'mean', 'std', 'sum']
image_min = utils.image_min_value(img, region, scale)
image_max = utils.image_max_value(img, region, scale)
image_mean = utils.image_mean_value(img, region, scale)
image_std = utils.image_std_value(img, region, scale)
image_sum = utils.image_sum_value(img, region, scale)
stat_results = ee.List(
[image_min, image_max, image_mean, image_std, image_sum])
stats = ee.Dictionary.fromLists(stat_types, stat_results)
return stats
def date_sequence(start, end, unit, date_format='YYYY-MM-dd'):
start_date = ee.Date(start)
end_date = ee.Date(end)
count = ee.Number(end_date.difference(start_date, unit)).toInt()
num_seq = ee.List.sequence(0, count)
date_seq = num_seq.map(
lambda d: start_date.advance(d, unit).format(date_format))
return date_seq
def adjust_longitude(in_fc):
try:
keys = in_fc.keys()
if 'geometry' in keys:
coordinates = in_fc['geometry']['coordinates']
if in_fc['geometry']['type'] == 'Point':
longitude = coordinates[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['geometry']['coordinates'][0] = longitude
elif in_fc['geometry']['type'] == 'Polygon':
for index1, item in enumerate(coordinates):
for index2, element in enumerate(item):
longitude = element[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['geometry']['coordinates'][index1][index2][0] = longitude
elif in_fc['geometry']['type'] == 'LineString':
for index, element in enumerate(coordinates):
longitude = element[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['geometry']['coordinates'][index][0] = longitude
elif 'type' in keys:
coordinates = in_fc['coordinates']
if in_fc['type'] == 'Point':
longitude = coordinates[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['coordinates'][0] = longitude
elif in_fc['type'] == 'Polygon':
for index1, item in enumerate(coordinates):
for index2, element in enumerate(item):
longitude = element[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['coordinates'][index1][index2][0] = longitude
elif in_fc['type'] == 'LineString':
for index, element in enumerate(coordinates):
longitude = element[0]
if longitude < - 180:
longitude = 360 + longitude
elif longitude > 180:
longitude = longitude - 360
in_fc['coordinates'][index][0] = longitude
return in_fc
except Exception as e:
print(e)
return None
def set_proxy(port=1080, ip='http://127.0.0.1'):
import os
import requests
try:
if not ip.startswith('http'):
ip = 'http://' + ip
proxy = '{}:{}'.format(ip, port)
os.environ['HTTP_PROXY'] = proxy
os.environ['HTTPS_PROXY'] = proxy
a = requests.get('https://earthengine.google.com/')
if a.status_code != 200:
print(
'Failed to connect to Earth Engine. Please double check the port number and ip address.')
except Exception as e:
print(e)
def in_colab_shell():
try:
import google.colab
return True
except ImportError:
return False
def is_drive_mounted():
drive_path = '/content/drive/My Drive'
if os.path.exists(drive_path):
return True
else:
return False
def credentials_in_drive():
credentials_path = '/content/drive/My Drive/.config/earthengine/credentials'
if os.path.exists(credentials_path):
return True
else:
return False
def credentials_in_colab():
credentials_path = '/root/.config/earthengine/credentials'
if os.path.exists(credentials_path):
return True
else:
return False
def copy_credentials_to_drive():
import shutil
src = '/root/.config/earthengine/credentials'
dst = '/content/drive/My Drive/.config/earthengine/credentials'
wd = os.path.dirname(dst)
if not os.path.exists(wd):
os.makedirs(wd)
shutil.copyfile(src, dst)
def copy_credentials_to_colab():
import shutil
src = '/content/drive/My Drive/.config/earthengine/credentials'
dst = '/root/.config/earthengine/credentials'
wd = os.path.dirname(dst)
if not os.path.exists(wd):
os.makedirs(wd)
shutil.copyfile(src, dst)
def create_download_link(filename, title="Click here to download: "):
import base64
from IPython.display import HTML
data = open(filename, "rb").read()
b64 = base64.b64encode(data)
payload = b64.decode()
basename = os.path.basename(filename)
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" style="color:#0000FF;" target="_blank">{title}</a>'
html = html.format(payload=payload, title=title +
f' {basename}', filename=basename)
return HTML(html)
def edit_download_html(htmlWidget, filename, title="Click here to download: "):
from IPython.display import HTML
import ipywidgets as widgets
import base64
htmlWidget.value = "<i class=\"fa fa-spinner fa-spin fa-2x fa-fw\"></i><span class=\"sr-only\">Loading...</span>"
data = open(filename, "rb").read()
b64 = base64.b64encode(data)
payload = b64.decode()
basename = os.path.basename(filename)
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>'
htmlWidget.value = html.format(
payload=payload, title=title+basename, filename=basename)
def load_GeoTIFF(URL):
uri = URL.strip()
if uri.startswith('https://storage.googleapis.com/'):
uri = uri.replace('https://storage.googleapis.com/', 'gs://')
elif uri.startswith('https://storage.cloud.google.com/'):
uri = uri.replace('https://storage.cloud.google.com/', 'gs://')
if not uri.startswith('gs://'):
raise Exception(
'Invalid GCS URL: {}. Expected something of the form "gs://bucket/path/to/object.tif".'.format(uri))
if not uri.lower().endswith('.tif'):
raise Exception(
'Invalid GCS URL: {}. Expected something of the form "gs://bucket/path/to/object.tif".'.format(uri))
cloud_image = ee.Image.loadGeoTIFF(uri)
return cloud_image
def load_GeoTIFFs(URLs):
if not isinstance(URLs, list):
raise Exception('The URLs argument must be a list.')
URIs = []
for URL in URLs:
uri = URL.strip()
if uri.startswith('https://storage.googleapis.com/'):
uri = uri.replace('https://storage.googleapis.com/', 'gs://')
elif uri.startswith('https://storage.cloud.google.com/'):
uri = uri.replace('https://storage.cloud.google.com/', 'gs://')
if not uri.startswith('gs://'):
raise Exception(
'Invalid GCS URL: {}. Expected something of the form "gs://bucket/path/to/object.tif".'.format(uri))
if not uri.lower().endswith('.tif'):
raise Exception(
'Invalid GCS URL: {}. Expected something of the form "gs://bucket/path/to/object.tif".'.format(uri))
URIs.append(uri)
URIs = ee.List(URIs)
collection = URIs.map(lambda uri: ee.Image.loadGeoTIFF(uri))
return ee.ImageCollection(collection)
def landsat_ts_norm_diff(collection, bands=['Green', 'SWIR1'], threshold=0):
nd_images = collection.map(lambda img: img.normalizedDifference(
bands).gt(threshold).copyProperties(img, img.propertyNames()))
return nd_images
def landsat_ts_norm_diff_gif(collection, out_gif=None, vis_params=None, palette=['black', 'blue'], dimensions=768, frames_per_second=10):
coordinates = ee.Image(collection.first()).get('coordinates')
roi = ee.Geometry.Polygon(coordinates, None, False)
if out_gif is None:
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
filename = 'landsat_ts_nd_' + random_string() + '.gif'
out_gif = os.path.join(out_dir, filename)
elif not out_gif.endswith('.gif'):
raise Exception('The output file must end with .gif')
bands = ['nd']
if vis_params is None:
vis_params = {}
vis_params['bands'] = bands
vis_params['palette'] = palette
video_args = vis_params.copy()
video_args['dimensions'] = dimensions
video_args['region'] = roi
video_args['framesPerSecond'] = frames_per_second
video_args['crs'] = 'EPSG:3857'
if 'bands' not in video_args.keys():
video_args['bands'] = bands
download_ee_video(collection, video_args, out_gif)
return out_gif
| true | true |
f7fdc99b72357a26806137abf448bcc577cab82d | 3,742 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/image_os_disk_py3.py | ashirey-msft/azure-sdk-for-python | e04778e13306dad2e8fb044970215bad6296afb6 | [
"MIT"
] | null | null | null | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/image_os_disk_py3.py | ashirey-msft/azure-sdk-for-python | e04778e13306dad2e8fb044970215bad6296afb6 | [
"MIT"
] | null | null | null | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/image_os_disk_py3.py | ashirey-msft/azure-sdk-for-python | e04778e13306dad2e8fb044970215bad6296afb6 | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageOSDisk(Model):
"""Describes an Operating System disk.
All required parameters must be populated in order to send to Azure.
:param os_type: Required. This property allows you to specify the type of
the OS that is included in the disk if creating a VM from a custom image.
<br><br> Possible values are: <br><br> **Windows** <br><br> **Linux**.
Possible values include: 'Windows', 'Linux'
:type os_type: str or
~azure.mgmt.compute.v2018_06_01.models.OperatingSystemTypes
:param os_state: Required. The OS State. Possible values include:
'Generalized', 'Specialized'
:type os_state: str or
~azure.mgmt.compute.v2018_06_01.models.OperatingSystemStateTypes
:param snapshot: The snapshot.
:type snapshot: ~azure.mgmt.compute.v2018_06_01.models.SubResource
:param managed_disk: The managedDisk.
:type managed_disk: ~azure.mgmt.compute.v2018_06_01.models.SubResource
:param blob_uri: The Virtual Hard Disk.
:type blob_uri: str
:param caching: Specifies the caching requirements. <br><br> Possible
values are: <br><br> **None** <br><br> **ReadOnly** <br><br> **ReadWrite**
<br><br> Default: **None for Standard storage. ReadOnly for Premium
storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite'
:type caching: str or ~azure.mgmt.compute.v2018_06_01.models.CachingTypes
:param disk_size_gb: Specifies the size of empty data disks in gigabytes.
This element can be used to overwrite the name of the disk in a virtual
machine image. <br><br> This value cannot be larger than 1023 GB
:type disk_size_gb: int
:param storage_account_type: Specifies the storage account type for the
managed disk. Possible values are: Standard_LRS, Premium_LRS, and
StandardSSD_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS',
'StandardSSD_LRS', 'UltraSSD_LRS'
:type storage_account_type: str or
~azure.mgmt.compute.v2018_06_01.models.StorageAccountTypes
"""
_validation = {
'os_type': {'required': True},
'os_state': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'OperatingSystemTypes'},
'os_state': {'key': 'osState', 'type': 'OperatingSystemStateTypes'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(self, *, os_type, os_state, snapshot=None, managed_disk=None, blob_uri: str=None, caching=None, disk_size_gb: int=None, storage_account_type=None, **kwargs) -> None:
super(ImageOSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.os_state = os_state
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
| 47.367089 | 182 | 0.65954 |
from msrest.serialization import Model
class ImageOSDisk(Model):
_validation = {
'os_type': {'required': True},
'os_state': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'OperatingSystemTypes'},
'os_state': {'key': 'osState', 'type': 'OperatingSystemStateTypes'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(self, *, os_type, os_state, snapshot=None, managed_disk=None, blob_uri: str=None, caching=None, disk_size_gb: int=None, storage_account_type=None, **kwargs) -> None:
super(ImageOSDisk, self).__init__(**kwargs)
self.os_type = os_type
self.os_state = os_state
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
| true | true |
f7fdcc6b2b516329734a1c19749bfc557a6c4e40 | 391 | py | Python | sizakat/wsgi.py | artmxra7/sizkt-backend | 49263b7d937ac62307b8ced47fa1497226f1d4cc | [
"MIT"
] | null | null | null | sizakat/wsgi.py | artmxra7/sizkt-backend | 49263b7d937ac62307b8ced47fa1497226f1d4cc | [
"MIT"
] | 5 | 2021-03-30T14:16:46.000Z | 2021-09-22T19:38:45.000Z | sizakat/wsgi.py | artmxra7/sizkt-backend | 49263b7d937ac62307b8ced47fa1497226f1d4cc | [
"MIT"
] | 1 | 2020-11-14T02:58:29.000Z | 2020-11-14T02:58:29.000Z | """
WSGI config for sizakat project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sizakat.settings')
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sizakat.settings')
application = get_wsgi_application()
| true | true |
f7fdcd5a5a66bda9b8e917b753ad6475909d0e8d | 1,894 | py | Python | utils/swift_build_support/swift_build_support/products/indexstoredb.py | orakaro/swift | e6ae7867bfd017a614b78a320618a1b675d6b82f | [
"Apache-2.0"
] | 3 | 2019-06-25T10:54:55.000Z | 2020-07-07T10:29:53.000Z | utils/swift_build_support/swift_build_support/products/indexstoredb.py | liangliang12/swift | e7c294a076b41a9c87be99bf28d91f864977012a | [
"Apache-2.0"
] | null | null | null | utils/swift_build_support/swift_build_support/products/indexstoredb.py | liangliang12/swift | e7c294a076b41a9c87be99bf28d91f864977012a | [
"Apache-2.0"
] | null | null | null | # swift_build_support/products/indexstoredb.py -------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
import os
import platform
from . import product
from .. import shell
from .. import targets
class IndexStoreDB(product.Product):
@classmethod
def product_source_name(cls):
return "indexstore-db"
@classmethod
def is_build_script_impl_product(cls):
return False
def build(self, host_target):
run_build_script_helper('build', host_target, self, self.args)
def test(self, host_target):
if self.args.test and self.args.test_indexstoredb:
run_build_script_helper('test', host_target, self, self.args)
def run_build_script_helper(action, host_target, product, args):
script_path = os.path.join(
product.source_dir, 'Utilities', 'build-script-helper.py')
toolchain_path = args.install_destdir
if platform.system() == 'Darwin':
# The prefix is an absolute path, so concatenate without os.path.
toolchain_path += \
targets.darwin_toolchain_prefix(args.install_prefix)
configuration = 'debug' if args.build_variant == 'Debug' else 'release'
helper_cmd = [
script_path,
action,
'--verbose',
'--package-path', product.source_dir,
'--build-path', product.build_dir,
'--configuration', configuration,
'--toolchain', toolchain_path,
'--ninja-bin', product.toolchain.ninja,
]
shell.call(helper_cmd)
| 32.655172 | 79 | 0.653643 |
import os
import platform
from . import product
from .. import shell
from .. import targets
class IndexStoreDB(product.Product):
@classmethod
def product_source_name(cls):
return "indexstore-db"
@classmethod
def is_build_script_impl_product(cls):
return False
def build(self, host_target):
run_build_script_helper('build', host_target, self, self.args)
def test(self, host_target):
if self.args.test and self.args.test_indexstoredb:
run_build_script_helper('test', host_target, self, self.args)
def run_build_script_helper(action, host_target, product, args):
script_path = os.path.join(
product.source_dir, 'Utilities', 'build-script-helper.py')
toolchain_path = args.install_destdir
if platform.system() == 'Darwin':
toolchain_path += \
targets.darwin_toolchain_prefix(args.install_prefix)
configuration = 'debug' if args.build_variant == 'Debug' else 'release'
helper_cmd = [
script_path,
action,
'--verbose',
'--package-path', product.source_dir,
'--build-path', product.build_dir,
'--configuration', configuration,
'--toolchain', toolchain_path,
'--ninja-bin', product.toolchain.ninja,
]
shell.call(helper_cmd)
| true | true |
f7fdcdb6485fbe32cd6afbac28f4491107befbc0 | 2,519 | py | Python | src/responsive/Responsive.py | zhengtong0898/django-decode | 69680853a4a5b07f6a9c4b65c7d86b2d401a92b1 | [
"MIT"
] | 5 | 2020-07-14T07:48:10.000Z | 2021-12-20T21:20:10.000Z | src/responsive/Responsive.py | zhengtong0898/django-decode | 69680853a4a5b07f6a9c4b65c7d86b2d401a92b1 | [
"MIT"
] | 7 | 2021-03-26T03:13:38.000Z | 2022-03-12T00:42:03.000Z | src/responsive/Responsive.py | zhengtong0898/django-decode | 69680853a4a5b07f6a9c4b65c7d86b2d401a92b1 | [
"MIT"
] | 1 | 2021-02-16T07:04:25.000Z | 2021-02-16T07:04:25.000Z | import wx
import wx.grid
import typing
from pubsub import pub
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, title="A Simple Grid", size=(900, 400))
self.panel = wx.Panel(self)
pub.subscribe(self.grid_rerender, "grid_rerender") # grid_update.connect(self.grid_rerender)
# 初始化数据
# [
# ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
# ['11', '12', '13', '14', '15', '16', '17', '18', '19', '20']
# ['21', '22', '23', '24', '25', '26', '27', '28', '29', '30']
# ]
items = [[str(row*10 + col + 1).zfill(2) for col in range(10)] for row in range(3)]
rows, cols = len(items), 10
self.grid = wx.grid.Grid(self.panel)
self.grid.CreateGrid(rows, cols)
wx.CallAfter(pub.sendMessage, "grid_rerender", **{"items": items}) # grid_update.send()
# 软件窗体布局
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.grid, 1, wx.EXPAND)
self.panel.SetSizer(sizer)
# 模拟一些数据, 三秒后重新渲染表格
# [
# ['081', '082', '083', '084', '085', '086', '087', '088', '089', '090']
# ['091', '092', '093', '094', '095', '096', '097', '098', '099', '100']
# ['101', '102', '103', '104', '105', '106', '107', '108', '109', '110']
# ['111', '112', '113', '114', '115', '116', '117', '118', '119', '120']
# ['121', '122', '123', '124', '125', '126', '127', '128', '129', '130']
# ['131', '132', '133', '134', '135', '136', '137', '138', '139', '140']
# ['141', '142', '143', '144', '145', '146', '147', '148', '149', '150']
# ['151', '152', '153', '154', '155', '156', '157', '158', '159', '160']
# ]
items = [[str(row * 10 + col + 1).zfill(3) for col in range(10)] for row in range(8, 16)]
wx.CallLater(3000, pub.sendMessage, "grid_rerender", **{"items": items}) # grid_update.send()
def grid_rerender(self, items: typing.List):
if not getattr(self, "grid"): raise RuntimeError("grid is not initialize")
all_rows = self.grid.GetNumberRows()
self.grid.DeleteRows(0, all_rows)
self.grid.AppendRows(len(items))
for row, item in enumerate(items):
for col, value in enumerate(item):
self.grid.SetCellValue(row, col, value)
if __name__ == "__main__":
app = wx.App()
frame = MainFrame()
frame.Show()
app.MainLoop()
| 41.983333 | 117 | 0.500198 | import wx
import wx.grid
import typing
from pubsub import pub
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, title="A Simple Grid", size=(900, 400))
self.panel = wx.Panel(self)
pub.subscribe(self.grid_rerender, "grid_rerender")
items = [[str(row*10 + col + 1).zfill(2) for col in range(10)] for row in range(3)]
rows, cols = len(items), 10
self.grid = wx.grid.Grid(self.panel)
self.grid.CreateGrid(rows, cols)
wx.CallAfter(pub.sendMessage, "grid_rerender", **{"items": items})
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.grid, 1, wx.EXPAND)
self.panel.SetSizer(sizer)
items = [[str(row * 10 + col + 1).zfill(3) for col in range(10)] for row in range(8, 16)]
wx.CallLater(3000, pub.sendMessage, "grid_rerender", **{"items": items})
def grid_rerender(self, items: typing.List):
if not getattr(self, "grid"): raise RuntimeError("grid is not initialize")
all_rows = self.grid.GetNumberRows()
self.grid.DeleteRows(0, all_rows)
self.grid.AppendRows(len(items))
for row, item in enumerate(items):
for col, value in enumerate(item):
self.grid.SetCellValue(row, col, value)
if __name__ == "__main__":
app = wx.App()
frame = MainFrame()
frame.Show()
app.MainLoop()
| true | true |
f7fdcf18f27e413c741d140937778af39416871a | 4,565 | py | Python | CAAPR/CAAPR_AstroMagic/PTS/pts/do/magic/subtract.py | wdobbels/CAAPR | 50d0b32642a61af614c22f1c6dc3c4a00a1e71a3 | [
"MIT"
] | 7 | 2016-05-20T21:56:39.000Z | 2022-02-07T21:09:48.000Z | CAAPR/CAAPR_AstroMagic/PTS/pts/do/magic/subtract.py | wdobbels/CAAPR | 50d0b32642a61af614c22f1c6dc3c4a00a1e71a3 | [
"MIT"
] | 1 | 2019-03-21T16:10:04.000Z | 2019-03-22T17:21:56.000Z | CAAPR/CAAPR_AstroMagic/PTS/pts/do/magic/subtract.py | wdobbels/CAAPR | 50d0b32642a61af614c22f1c6dc3c4a00a1e71a3 | [
"MIT"
] | 1 | 2020-05-19T16:17:17.000Z | 2020-05-19T16:17:17.000Z | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.magic.subtract Run sky subtraction on an astronmical image.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import os
import argparse
# Import the relevant PTS classes and modules
from pts.magic.misc.imageimporter import ImageImporter
from pts.magic.sky.skysubtractor import SkySubtractor
from pts.magic.basics.mask import Mask
from pts.core.tools import configuration
# -----------------------------------------------------------------
# Create the command-line parser
parser = argparse.ArgumentParser()
# Basic
parser.add_argument("image", type=str, help="the name of the input image")
# Logging
parser.add_argument("--debug", action="store_true", help="enable debug logging mode")
parser.add_argument('--report', action='store_true', help='write a report file')
parser.add_argument("mask", type=str, help="the name of the mask image resulting from the extraction procedure")
parser.add_argument("galaxies", type=str, help="the name of the file specifying the galaxy regions")
#parser.add_argument("saturation", type=str, nargs='?', help="the name of the file specifying the saturation regions", default=None)
parser.add_argument('--config', type=str, help='the name of a configuration file')
parser.add_argument("--settings", type=configuration.from_string, help="settings")
parser.add_argument("-i", "--input", type=str, help="the name of the input directory")
parser.add_argument("-o", "--output", type=str, help="the name of the output directory")
parser.add_argument("--bad", type=str, help="the name of the file specifying regions that have to be added to the mask of bad pixels")
# Parse the command line arguments
arguments = parser.parse_args()
# -----------------------------------------------------------------
# -- Input --
# If an input directory is given
if arguments.input is not None:
# Determine the full path to the input directory
arguments.input_path = os.path.abspath(arguments.input)
# Give an error if the input directory does not exist
if not os.path.isdir(arguments.input_path): raise argparse.ArgumentError(arguments.input_path, "The input directory does not exist")
# If no input directory is given, assume the input is placed in the current working directory
else: arguments.input_path = os.getcwd()
# -- Output --
# If an output directory is given
if arguments.output is not None:
# Determine the full path to the output directory
arguments.output_path = os.path.abspath(arguments.output)
# Create the directory if it does not yet exist
if not os.path.isdir(arguments.output_path): os.makedirs(arguments.output_path)
# If no output directory is given, place the output in the current working directory
else: arguments.output_path = os.getcwd()
# -----------------------------------------------------------------
# Determine the full path to the image
image_path = os.path.abspath(arguments.image)
# Determine the full path to the bad region file
bad_region_path = os.path.join(arguments.input_path, arguments.bad) if arguments.bad is not None else None
# Import the image
importer = ImageImporter()
importer.run(image_path, bad_region_path=bad_region_path)
# Determine the full path to the mask
mask_path = os.path.abspath(arguments.mask)
# Open the mask frame
mask = Mask.from_file(mask_path)
# -----------------------------------------------------------------
# Determine the full path to the galaxy region file and the saturation region file
galaxy_region_path = os.path.join(arguments.input_path, arguments.galaxies)
saturation_region_path = os.path.join(arguments.input_path, arguments.saturation) if arguments.saturation is not None else None
# Create a SkySubtractor instance and configure it according to the command-line arguments
subtractor = SkySubtractor.from_arguments(arguments)
# Run the subtractor
subtractor.run(importer.image.frames.primary, mask, galaxy_region_path, saturation_region_path, bad_mask=importer.mask)
# Save the result
subtractor.write_result(importer.image.original_header)
# -----------------------------------------------------------------
| 38.686441 | 136 | 0.674918 |
t os
import argparse
from pts.magic.misc.imageimporter import ImageImporter
from pts.magic.sky.skysubtractor import SkySubtractor
from pts.magic.basics.mask import Mask
from pts.core.tools import configuration
parser = argparse.ArgumentParser()
parser.add_argument("image", type=str, help="the name of the input image")
parser.add_argument("--debug", action="store_true", help="enable debug logging mode")
parser.add_argument('--report', action='store_true', help='write a report file')
parser.add_argument("mask", type=str, help="the name of the mask image resulting from the extraction procedure")
parser.add_argument("galaxies", type=str, help="the name of the file specifying the galaxy regions")
parser.add_argument('--config', type=str, help='the name of a configuration file')
parser.add_argument("--settings", type=configuration.from_string, help="settings")
parser.add_argument("-i", "--input", type=str, help="the name of the input directory")
parser.add_argument("-o", "--output", type=str, help="the name of the output directory")
parser.add_argument("--bad", type=str, help="the name of the file specifying regions that have to be added to the mask of bad pixels")
arguments = parser.parse_args()
if arguments.input is not None:
arguments.input_path = os.path.abspath(arguments.input)
if not os.path.isdir(arguments.input_path): raise argparse.ArgumentError(arguments.input_path, "The input directory does not exist")
else: arguments.input_path = os.getcwd()
if arguments.output is not None:
arguments.output_path = os.path.abspath(arguments.output)
if not os.path.isdir(arguments.output_path): os.makedirs(arguments.output_path)
else: arguments.output_path = os.getcwd()
image_path = os.path.abspath(arguments.image)
bad_region_path = os.path.join(arguments.input_path, arguments.bad) if arguments.bad is not None else None
importer = ImageImporter()
importer.run(image_path, bad_region_path=bad_region_path)
mask_path = os.path.abspath(arguments.mask)
mask = Mask.from_file(mask_path)
galaxy_region_path = os.path.join(arguments.input_path, arguments.galaxies)
saturation_region_path = os.path.join(arguments.input_path, arguments.saturation) if arguments.saturation is not None else None
subtractor = SkySubtractor.from_arguments(arguments)
subtractor.run(importer.image.frames.primary, mask, galaxy_region_path, saturation_region_path, bad_mask=importer.mask)
subtractor.write_result(importer.image.original_header)
| true | true |
f7fdcfdf21ac4ecf1f8b843bdc7b5446d47bbba7 | 642 | py | Python | diffie_helman.py | kbrezinski/Cryptography-Encryption | 0d44c06a509fb5b3b85cad10ac0e98264490fd01 | [
"BSD-3-Clause"
] | null | null | null | diffie_helman.py | kbrezinski/Cryptography-Encryption | 0d44c06a509fb5b3b85cad10ac0e98264490fd01 | [
"BSD-3-Clause"
] | null | null | null | diffie_helman.py | kbrezinski/Cryptography-Encryption | 0d44c06a509fb5b3b85cad10ac0e98264490fd01 | [
"BSD-3-Clause"
] | null | null | null |
key_a = 17
key_b = 44_207
g = 3
n = 57_349
message = "IM THE WINNER AND YOU'RE NOT!"
# encrypt function given key, g, n
def encrypt(key: int, g: int, n: int):
ans = pow(g, key, n)
print(f"{key=} {ans=}")
return ans
def encrypt_message(key: int, text: str):
cipher = ''
for i, c in enumerate(range(len(text))):
cipher += chr(ord(text[i]) ^ key)
print(f"{cipher=}")
return cipher
def brute_force(max_len: int, mess: str):
for i in range(max_len + 1):
encrypt_message(i, mess)
brute_force(key_b, encrypt_message(key_b, message))
#encrypt(key_a, ans_b, n)
#encrypt(key_b, ans_a, n)
| 16.461538 | 51 | 0.615265 |
key_a = 17
key_b = 44_207
g = 3
n = 57_349
message = "IM THE WINNER AND YOU'RE NOT!"
# encrypt function given key, g, n
def encrypt(key: int, g: int, n: int):
ans = pow(g, key, n)
print(f"{key=} {ans=}")
return ans
def encrypt_message(key: int, text: str):
cipher = ''
for i, c in enumerate(range(len(text))):
cipher += chr(ord(text[i]) ^ key)
print(f"{cipher=}")
return cipher
def brute_force(max_len: int, mess: str):
for i in range(max_len + 1):
encrypt_message(i, mess)
brute_force(key_b, encrypt_message(key_b, message))
#encrypt(key_a, ans_b, n)
#encrypt(key_b, ans_a, n)
| true | true |
f7fdcff2cbc6d59473e407d5d13b4d592c160dad | 5,701 | py | Python | django/docs/releases/0.95.txt.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | django/docs/releases/0.95.txt.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | django/docs/releases/0.95.txt.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXX XXXX XXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XX XXX XXXXXX XXXX XXXXXXXX
XXXX XXXXXXXXXX X XXXXXXXXXXX XXXXXXX XX XXXXXX XXXXXXXXXXX XXXXX XXX XXXX
XXXXXXX XX XXXXXXX XXXXX XXX XXXXXXX XX XXXXX XXXXXX XX XXXX XXXXXXX XXXXX XX
XXX XXXXXXXXX XX XXXX XX XXXXX XXX X XXXXXXX XX XXXXXXXXX XXXXXX
XXXXXXXXXXX XXX XXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXX XX XXXXXXXX XX XXXXXXX X XXXXXX XXXXXXXXX XXXXX XXX XXXXXXXXXX
XXXXXXX XX XXXX XX XXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXX XXX XXXXXXX
XXXXXXXX XXXX XXX XXX XXX XXXXXXXX XXX XXXXX XX XXXXXXXXXXX XXXXXXX XXXXXXX
XXXXXX XXXX XXX X XXXXX XXXX XX XXXXX XXXXX XX XXX XXXXXXXXX XXXX XXXXXX XXXX
XXXXX XXXX XXXX XXXXX XXXXXXX XXXXXX XXXX XXX XXX XXXXXXXXXXXXXXXXXXXXX XXXXX
XXXXX XXXXX XX XXX XXXXX XXXXXXXXX XX XXX XXXXXXXXXXXXX
XXX XXX XXXX X XXXX XX XXX XXXX XX XXX XXXXXXXX XXXX XXX XXXXXX XX
XXXXXXXX XX XXX XXXXXXX XX XXXX XXXXXXXXX XXX XXXXXX XX XXXX XX XX XXXX XX XXXX
XX XXXX XXXX XXX XX XXXX XX XXX XXXXXXXXXX XXXX XXX XXXXXX XX XXX XXXXXXX
XXXXXXXXXXXX XXXX XX XXXXXXXX XXXX XXXX XXXXX XX XXXXXXXX XXXXXX XXXXXXX XXXX
XXXXX XXXXXX XXXXX XX XX XXX XX X XXXXXX XXXXXXX XXXX XXX XXX XXXXXXXX XX XXXXX
XXX XXXXXXX XXXX XXX XXXXX
XXXXXXX XXX XXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXX XXXXXXX XX XXXX XXXXXXX XXXX XXXXXXXXXX XXXXXXXXX XXXXX XXX XXXX
XXXXXXXX XXX X XXXXXX XX XXXXXXX XXX XXXXXXXXXXXXXXX XXXXXX XX XXXXXXXXXXXX
XXXX XXXXXX XXXXXXX X XXXXXX XX XXXXXXXXXXX XX XXX XXX XXXXXX XXXX XXX XX XX
XXXXXXX XXXX XXXX X XXXXXXXXXXX XX XXXXXXXXX XXXX XX XXX XXXXX XXXX XX XXXXXXX
XXXXX XX XXX XXXXXXXXXXX XXXXXXXX XXXX XXX XXXXXXXX XX XXXXX XXXX XXXXXXXX
XXXXXXXX XXXX XXXX XXXXX XX XXXXXXXXX XXX XXXXXXX XXXX XXXXXX XXXXXX XXXXXXXXX
XXXXXX XXX XXXXXXX
XXXXX XXXX XXXXX XXXXXXX XXXX XXXXX XX XXXX XXXXXXX XX X XXXXXXXX XXXXXXXX XX
XXXXXXXXXX XXXXX XXXX XXXXXXXXX XXXXXXXXXXXX XX XXXXX XXXXXXXXX XXXXXXXXXXXXXX
XXXXX XX XXXXXXX XXXXXXXXXXX XXXXXXX XX XXXXX
XXX XXX XXXXXXXX XXX XXXXXXX XXXXXXXXXX XX XXXX XXXXXXXX
X XXXXXX XXX XXXX X XXXX XXXXXXXXXX XXX XXXXXXX XXXXXXXXX XXXXXXXXX XXX
XXXXXXXXXX XXXXXXX XXXX XXX XXXXXXXXX
X XXXXXXXXXXXX XXXXXXX XXXXXXXXX XXX XXXXXXXXX XXX XXXXXX XX XXX XXXXXX
XXXXXXXXX XXXX XXXX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXXXXXXX
XXXXXXXXXXX XX XXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
X XXXX XXXXXXXX XXXXXXXXXXXXX XXXX XX XXX XXXXXXXXX XXXXX XXX XXXXXXXXX
XXXXX XXXX XXXX XXXXXXXXX XXX XXXXX XXXX XXXXXXXXXXXXXXXXXXX XX XXX XXXXX
XXXX XX XXX XXXXX XXXXXXXXXXXXX XXX XX XXXXXX XXXX XX XXXXXXX XXXXX
XXXXXXXX XXXXXXX
X XXXXXX XXX XXX XXXXXXX XXX XXXXXXXX XXXXXXXX XXXXXXXXXXXXX
X XXXXX XXXXX XXX XXXXXXX XX XXXXX XXXXXX XXXXXXXXXXXXXX XXX XXXXXXXXXXXXX
XXXXXXXX XXX XXXXXXXXXXXXXX XXXXX XXXXXXX XXXXXXXXX XXXXXXXX XXXX XX
XXXXX
X XXXXX XXXX XX XXXXXX XX XXX XXXXXX XXXXXXXXXXX XXXXXXXXX XX XXXXXXX
XXXXXXX X XXX XXXXXXXXX XXXX
X XXXX XXX XXXXXXXX XX XXX XXXXXX XXXXXXX X XXXXXXXXX XXXX XXXXXX XXXXX
XXXX XXX XXXXXXXXX XX XXXXXX XXXXXXXX XXX XX XXXX X XXXXXXX XXXXXXXX XXX
XX XXXX XX XXXXX XXXXXXX XXXXXX XX XXXXX XXXXXX XXX XXX XXXX XXX
XXXXXXXXXXXXXX XX XXXXX XXXX XXXXXXXXXXX XXX XXXXXXXXX XXXXXXXX XXXX X
XXXXXXXX XX XXXXXXXXXXX XXXXXXXXXX XX XXXXXXX XXX XXXXXXXX XXXX XXX
X XXXX XXX XXXX XXXXXXXX XXX XXXXXXX XX XXXXXXXX XXXXXX XXX XXXXXXXX
XXXXXXX XX XXXXXXX XXXXXX XXXX XXXXXXX XX XXXX XXXX XXX XXXXXXXXXX XXX
XXXXXXXXXXX XXXXXX XXXXXX
X XXXXXXXXXX XXXXXX XX XXX XXXXXXXXX XXX XXX XX XXXXXXXXXX XXXXXXX
XXXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXX XXXXXXXXX XXXX XXXXXXX XXX XXX
XXX XXXXXXXX XXX XXXXXX XXXXXXXXXX XXXXXX XXXXXX XXXXX XXXXXXXXXXXXX
X XXXX XXX XXXX XXXXX XX XXX XXXXXXXXX XXXX XXXX XXXXXXXXXXXXXXXXXX XX
XXXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXX XXX XXXXXX
XXXXXXXXX XXXXXXXXX XXXX XXX XXXXXXXXXX XXX XXX XXXX XXXXXXXXXXX XX XXXXX
XX XXXXX XXXX XX XXXXXXXXXX XXXX XXXXXX XX XXXXXXX XX XXXXXXXXX XX XXXXXX
XX XX XXX XXXXXXXX XX XXX XXXXXXXX XXXXX XXXX XX XXXX XXXXXX XXXXXXXXX
XXX XXXXXX XX XXXXXXX XXXXXXXX XX XXXX XXXX XXXXXXXXXXXXXXX XXXX XX XXX XXXX
XXXX XXXX XXX XXXXXXXXXXX XX XXXX XXXXXX XXXXXXXX XXXX XXXX XXX XXX XXXX XXXXX
XXXXXXXXXX XXXXXXX XXX XXXX XXXX XX XX XXXX XXXXX X XXXX XX XXX XXXXXXXXX
XXXXXXX XX XXXXXXXXX XX XXX XXXXXXXXX XXX XXXXXXX XXXX XXXXX XXXXX XX XXXX XX
XXXX XXXXXXXXXX XXX XXXXXXXXX XXXX XXXXXXXXXXX XXX XXXXXXX XXXXXXXXXX
XX XXXXXXXXX XXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXX XXX XXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXX XXXXXXXXX X XXXXXXX XXXX XXXXXXX XXX XXXXXXXXXXXXX XX XXX
XXXXXXXXXXXX XX XXXX XXXXXXXXX XXXXXXXXXXXX XXXXXXXXX XX XXX XXXXXXX XXXXXXXXXX
XXX XXXXXXXXX XXXXXXXXXXXXX XXXXXXXX XX XXXXXXXXXX XXXXXXXXXXXX XX XX XXXXXXXX
X XXXXXX XX XXXXXX XXXX XXXX XX XXXX XXX XXXXXX
XXX XXXX XXXXXXXXXXXX XXXXX XXX XXXXXXXXXXXXXXX XXXXXXX XXXX XX X XXXX XXXXXX
XXXXX XXXX XXXX XXXX XXXXX XXXXXXXXXXX XXX XXX XXXX XXX XXXXX XXX XXXX XX
XXXXXX XXXXXXXX XX XXXXXXXXX XXX XXXXXX XXX XXXXXXXX XXXXXX XXXXXXX XXXXXXX
XXXX XXXXXX XXXXXXXXX XXXXXX XXXX XXXX XXXXXXXXXXX XXX XXX XXXXXXXXXX XXXXXXX
XXX XXXXXXX XXXX XXXX XXXXXXXXX
XXXXXXXX XXX XXXXX XXX XXXXXX XXX XXXX XXXXXXXXX XXXXXXXX XXXXXXX XX XXXX
XXXXXXX X XXXXXXXXXXX XXXXXXX XX XXXXXXXXXXXXXXXX XXXX XX XXXXXXXXX XXXXXXXXX
XX XXXXXX XXXXX XXX XXXXXXXXXX XXXX XXXXXX XXX XXXXXX XXXXXXXX XXXXXX XXX
XXXXXXX XXXXXXXXX XX XXX XXXX XX XXX XXX XX XX XXXXX XX XXXX XX XXXXX
XX XXXXXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXX XXXXX XXXXXXX
XXX XXXXXX XXXX
XXXX XXXX
| 46.349593 | 79 | 0.83652 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXX XXXX XXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XX XXX XXXXXX XXXX XXXXXXXX
XXXX XXXXXXXXXX X XXXXXXXXXXX XXXXXXX XX XXXXXX XXXXXXXXXXX XXXXX XXX XXXX
XXXXXXX XX XXXXXXX XXXXX XXX XXXXXXX XX XXXXX XXXXXX XX XXXX XXXXXXX XXXXX XX
XXX XXXXXXXXX XX XXXX XX XXXXX XXX X XXXXXXX XX XXXXXXXXX XXXXXX
XXXXXXXXXXX XXX XXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXX XX XXXXXXXX XX XXXXXXX X XXXXXX XXXXXXXXX XXXXX XXX XXXXXXXXXX
XXXXXXX XX XXXX XX XXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXX XXX XXXXXXX
XXXXXXXX XXXX XXX XXX XXX XXXXXXXX XXX XXXXX XX XXXXXXXXXXX XXXXXXX XXXXXXX
XXXXXX XXXX XXX X XXXXX XXXX XX XXXXX XXXXX XX XXX XXXXXXXXX XXXX XXXXXX XXXX
XXXXX XXXX XXXX XXXXX XXXXXXX XXXXXX XXXX XXX XXX XXXXXXXXXXXXXXXXXXXXX XXXXX
XXXXX XXXXX XX XXX XXXXX XXXXXXXXX XX XXX XXXXXXXXXXXXX
XXX XXX XXXX X XXXX XX XXX XXXX XX XXX XXXXXXXX XXXX XXX XXXXXX XX
XXXXXXXX XX XXX XXXXXXX XX XXXX XXXXXXXXX XXX XXXXXX XX XXXX XX XX XXXX XX XXXX
XX XXXX XXXX XXX XX XXXX XX XXX XXXXXXXXXX XXXX XXX XXXXXX XX XXX XXXXXXX
XXXXXXXXXXXX XXXX XX XXXXXXXX XXXX XXXX XXXXX XX XXXXXXXX XXXXXX XXXXXXX XXXX
XXXXX XXXXXX XXXXX XX XX XXX XX X XXXXXX XXXXXXX XXXX XXX XXX XXXXXXXX XX XXXXX
XXX XXXXXXX XXXX XXX XXXXX
XXXXXXX XXX XXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXX XXXXXXX XX XXXX XXXXXXX XXXX XXXXXXXXXX XXXXXXXXX XXXXX XXX XXXX
XXXXXXXX XXX X XXXXXX XX XXXXXXX XXX XXXXXXXXXXXXXXX XXXXXX XX XXXXXXXXXXXX
XXXX XXXXXX XXXXXXX X XXXXXX XX XXXXXXXXXXX XX XXX XXX XXXXXX XXXX XXX XX XX
XXXXXXX XXXX XXXX X XXXXXXXXXXX XX XXXXXXXXX XXXX XX XXX XXXXX XXXX XX XXXXXXX
XXXXX XX XXX XXXXXXXXXXX XXXXXXXX XXXX XXX XXXXXXXX XX XXXXX XXXX XXXXXXXX
XXXXXXXX XXXX XXXX XXXXX XX XXXXXXXXX XXX XXXXXXX XXXX XXXXXX XXXXXX XXXXXXXXX
XXXXXX XXX XXXXXXX
XXXXX XXXX XXXXX XXXXXXX XXXX XXXXX XX XXXX XXXXXXX XX X XXXXXXXX XXXXXXXX XX
XXXXXXXXXX XXXXX XXXX XXXXXXXXX XXXXXXXXXXXX XX XXXXX XXXXXXXXX XXXXXXXXXXXXXX
XXXXX XX XXXXXXX XXXXXXXXXXX XXXXXXX XX XXXXX
XXX XXX XXXXXXXX XXX XXXXXXX XXXXXXXXXX XX XXXX XXXXXXXX
X XXXXXX XXX XXXX X XXXX XXXXXXXXXX XXX XXXXXXX XXXXXXXXX XXXXXXXXX XXX
XXXXXXXXXX XXXXXXX XXXX XXX XXXXXXXXX
X XXXXXXXXXXXX XXXXXXX XXXXXXXXX XXX XXXXXXXXX XXX XXXXXX XX XXX XXXXXX
XXXXXXXXX XXXX XXXX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXXXXXXX
XXXXXXXXXXX XX XXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
X XXXX XXXXXXXX XXXXXXXXXXXXX XXXX XX XXX XXXXXXXXX XXXXX XXX XXXXXXXXX
XXXXX XXXX XXXX XXXXXXXXX XXX XXXXX XXXX XXXXXXXXXXXXXXXXXXX XX XXX XXXXX
XXXX XX XXX XXXXX XXXXXXXXXXXXX XXX XX XXXXXX XXXX XX XXXXXXX XXXXX
XXXXXXXX XXXXXXX
X XXXXXX XXX XXX XXXXXXX XXX XXXXXXXX XXXXXXXX XXXXXXXXXXXXX
X XXXXX XXXXX XXX XXXXXXX XX XXXXX XXXXXX XXXXXXXXXXXXXX XXX XXXXXXXXXXXXX
XXXXXXXX XXX XXXXXXXXXXXXXX XXXXX XXXXXXX XXXXXXXXX XXXXXXXX XXXX XX
XXXXX
X XXXXX XXXX XX XXXXXX XX XXX XXXXXX XXXXXXXXXXX XXXXXXXXX XX XXXXXXX
XXXXXXX X XXX XXXXXXXXX XXXX
X XXXX XXX XXXXXXXX XX XXX XXXXXX XXXXXXX X XXXXXXXXX XXXX XXXXXX XXXXX
XXXX XXX XXXXXXXXX XX XXXXXX XXXXXXXX XXX XX XXXX X XXXXXXX XXXXXXXX XXX
XX XXXX XX XXXXX XXXXXXX XXXXXX XX XXXXX XXXXXX XXX XXX XXXX XXX
XXXXXXXXXXXXXX XX XXXXX XXXX XXXXXXXXXXX XXX XXXXXXXXX XXXXXXXX XXXX X
XXXXXXXX XX XXXXXXXXXXX XXXXXXXXXX XX XXXXXXX XXX XXXXXXXX XXXX XXX
X XXXX XXX XXXX XXXXXXXX XXX XXXXXXX XX XXXXXXXX XXXXXX XXX XXXXXXXX
XXXXXXX XX XXXXXXX XXXXXX XXXX XXXXXXX XX XXXX XXXX XXX XXXXXXXXXX XXX
XXXXXXXXXXX XXXXXX XXXXXX
X XXXXXXXXXX XXXXXX XX XXX XXXXXXXXX XXX XXX XX XXXXXXXXXX XXXXXXX
XXXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXX XXXXXXXXX XXXX XXXXXXX XXX XXX
XXX XXXXXXXX XXX XXXXXX XXXXXXXXXX XXXXXX XXXXXX XXXXX XXXXXXXXXXXXX
X XXXX XXX XXXX XXXXX XX XXX XXXXXXXXX XXXX XXXX XXXXXXXXXXXXXXXXXX XX
XXXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXX XXX XXXXXX
XXXXXXXXX XXXXXXXXX XXXX XXX XXXXXXXXXX XXX XXX XXXX XXXXXXXXXXX XX XXXXX
XX XXXXX XXXX XX XXXXXXXXXX XXXX XXXXXX XX XXXXXXX XX XXXXXXXXX XX XXXXXX
XX XX XXX XXXXXXXX XX XXX XXXXXXXX XXXXX XXXX XX XXXX XXXXXX XXXXXXXXX
XXX XXXXXX XX XXXXXXX XXXXXXXX XX XXXX XXXX XXXXXXXXXXXXXXX XXXX XX XXX XXXX
XXXX XXXX XXX XXXXXXXXXXX XX XXXX XXXXXX XXXXXXXX XXXX XXXX XXX XXX XXXX XXXXX
XXXXXXXXXX XXXXXXX XXX XXXX XXXX XX XX XXXX XXXXX X XXXX XX XXX XXXXXXXXX
XXXXXXX XX XXXXXXXXX XX XXX XXXXXXXXX XXX XXXXXXX XXXX XXXXX XXXXX XX XXXX XX
XXXX XXXXXXXXXX XXX XXXXXXXXX XXXX XXXXXXXXXXX XXX XXXXXXX XXXXXXXXXX
XX XXXXXXXXX XXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXX XXX XXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXX XXXXXXXXX X XXXXXXX XXXX XXXXXXX XXX XXXXXXXXXXXXX XX XXX
XXXXXXXXXXXX XX XXXX XXXXXXXXX XXXXXXXXXXXX XXXXXXXXX XX XXX XXXXXXX XXXXXXXXXX
XXX XXXXXXXXX XXXXXXXXXXXXX XXXXXXXX XX XXXXXXXXXX XXXXXXXXXXXX XX XX XXXXXXXX
X XXXXXX XX XXXXXX XXXX XXXX XX XXXX XXX XXXXXX
XXX XXXX XXXXXXXXXXXX XXXXX XXX XXXXXXXXXXXXXXX XXXXXXX XXXX XX X XXXX XXXXXX
XXXXX XXXX XXXX XXXX XXXXX XXXXXXXXXXX XXX XXX XXXX XXX XXXXX XXX XXXX XX
XXXXXX XXXXXXXX XX XXXXXXXXX XXX XXXXXX XXX XXXXXXXX XXXXXX XXXXXXX XXXXXXX
XXXX XXXXXX XXXXXXXXX XXXXXX XXXX XXXX XXXXXXXXXXX XXX XXX XXXXXXXXXX XXXXXXX
XXX XXXXXXX XXXX XXXX XXXXXXXXX
XXXXXXXX XXX XXXXX XXX XXXXXX XXX XXXX XXXXXXXXX XXXXXXXX XXXXXXX XX XXXX
XXXXXXX X XXXXXXXXXXX XXXXXXX XX XXXXXXXXXXXXXXXX XXXX XX XXXXXXXXX XXXXXXXXX
XX XXXXXX XXXXX XXX XXXXXXXXXX XXXX XXXXXX XXX XXXXXX XXXXXXXX XXXXXX XXX
XXXXXXX XXXXXXXXX XX XXX XXXX XX XXX XXX XX XX XXXXX XX XXXX XX XXXXX
XX XXXXXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXX XXXXX XXXXXXX
XXX XXXXXX XXXX
XXXX XXXX
| false | true |
f7fdd0cf28d58b12a407b6868232dd098064d4dd | 164 | py | Python | jp.atcoder/abc088/abc088_a/8710718.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc088/abc088_a/8710718.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc088/abc088_a/8710718.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
def main():
n, a = map(int, sys.stdin.read().split())
print("Yes" if n % 500 <= a else "No")
if __name__ == "__main__":
main()
| 14.909091 | 46 | 0.512195 | import sys
def main():
n, a = map(int, sys.stdin.read().split())
print("Yes" if n % 500 <= a else "No")
if __name__ == "__main__":
main()
| true | true |
f7fdd1975d4756109c9cbe2a682b60c81f8ca70f | 5,950 | py | Python | kanga/forms.py | deptofdefense/kanga | 9c8d926a4828e2fca528915ddf35759d1c328c85 | [
"MIT"
] | 1 | 2022-03-05T01:17:59.000Z | 2022-03-05T01:17:59.000Z | kanga/forms.py | deptofdefense/kanga | 9c8d926a4828e2fca528915ddf35759d1c328c85 | [
"MIT"
] | null | null | null | kanga/forms.py | deptofdefense/kanga | 9c8d926a4828e2fca528915ddf35759d1c328c85 | [
"MIT"
] | null | null | null | # =================================================================
#
# Work of the U.S. Department of Defense, Defense Digital Service.
# Released as open source under the MIT License. See LICENSE file.
#
# =================================================================
from django import forms
from kanga import settings
from kanga.models import Group, Origin, Template
class NewAccountForm(forms.Form):
name = forms.CharField(max_length=255)
sid = forms.CharField(max_length=255)
auth_token = forms.CharField(max_length=255)
active = forms.CharField(max_length=255)
class EditAccountForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
sid = forms.CharField(max_length=255)
auth_token = forms.CharField(max_length=255)
active = forms.CharField(max_length=255)
class PlanAccountForm(forms.Form):
pass
class AssetForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
file = forms.FileField()
class NewGroupForm(forms.Form):
name = forms.CharField(max_length=255)
class EditGroupForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
class NewPlanForm(forms.Form):
name = forms.CharField(max_length=255)
account = forms.CharField(max_length=255)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for p in settings.AVAILABLE_PLATFORMS:
self.fields["platform-{}".format(p["id"])] = forms.CharField(max_length=255, required=False)
for g in Group.objects.all():
self.fields["group-{}".format(g.id)] = forms.CharField(max_length=255, required=False)
for o in Origin.objects.all():
self.fields["origin-{}".format(o.id)] = forms.CharField(max_length=255, required=False)
for t in Template.objects.all():
self.fields["template-{}".format(t.id)] = forms.CharField(max_length=255, required=False)
def clean(self):
platforms = set()
for p in settings.AVAILABLE_PLATFORMS:
if self.cleaned_data.get("platform-{}".format(p["id"])) == p["id"]:
platforms.add(p["id"])
self.cleaned_data["platforms"] = platforms
groups = set()
for g in Group.objects.all():
if self.cleaned_data.get("group-{}".format(g.id)) == str(g.uuid):
groups.add(g)
self.cleaned_data["groups"] = groups
origins = set()
for o in Origin.objects.all():
if self.cleaned_data.get("origin-{}".format(o.id)) == str(o.uuid):
origins.add(o)
self.cleaned_data["origins"] = origins
templates = set()
for t in Template.objects.all():
if self.cleaned_data.get("template-{}".format(t.id)) == str(t.uuid):
templates.add(t)
self.cleaned_data["templates"] = templates
class EditPlanForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
account = forms.CharField(max_length=255)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for p in settings.AVAILABLE_PLATFORMS:
self.fields["platform-{}".format(p["id"])] = forms.CharField(max_length=255, required=False)
for g in Group.objects.all():
self.fields["group-{}".format(g.id)] = forms.CharField(max_length=255, required=False)
for o in Origin.objects.all():
self.fields["origin-{}".format(o.id)] = forms.CharField(max_length=255, required=False)
for t in Template.objects.all():
self.fields["template-{}".format(t.id)] = forms.CharField(max_length=255, required=False)
def clean(self):
platforms = set()
for p in settings.AVAILABLE_PLATFORMS:
if self.cleaned_data.get("platform-{}".format(p["id"])) == p["id"]:
platforms.add(p["id"])
self.cleaned_data["platforms"] = platforms
groups = set()
for g in Group.objects.all():
if self.cleaned_data.get("group-{}".format(g.id)) == str(g.uuid):
groups.add(g)
self.cleaned_data["groups"] = groups
origins = set()
for o in Origin.objects.all():
if self.cleaned_data.get("origin-{}".format(o.id)) == str(o.uuid):
origins.add(o)
self.cleaned_data["origins"] = origins
templates = set()
for t in Template.objects.all():
if self.cleaned_data.get("template-{}".format(t.id)) == str(t.uuid):
templates.add(t)
self.cleaned_data["templates"] = templates
print(self.cleaned_data)
class NewTargetForm(forms.Form):
first_name = forms.CharField(max_length=255)
last_name = forms.CharField(max_length=255)
group = forms.CharField(max_length=36)
phone_number = forms.CharField(max_length=20)
class EditTargetForm(forms.Form):
uuid = forms.CharField(max_length=36)
first_name = forms.CharField(max_length=255)
last_name = forms.CharField(max_length=255)
group = forms.CharField(max_length=36)
phone_number = forms.CharField(max_length=20)
class ImportTargetsForm(forms.Form):
group = forms.CharField(max_length=255)
file_format = forms.CharField(max_length=255)
sheet_name = forms.CharField(max_length=255)
first_name_field = forms.CharField(max_length=255)
last_name_field = forms.CharField(max_length=255)
phone_number_field = forms.CharField(max_length=255)
file = forms.FileField()
class NewTemplateForm(forms.Form):
name = forms.CharField(max_length=255)
body = forms.CharField()
class EditTemplateForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
body = forms.CharField()
class DeleteObjectForm(forms.Form):
uuid = forms.CharField(max_length=36)
| 34.593023 | 104 | 0.635294 |
from django import forms
from kanga import settings
from kanga.models import Group, Origin, Template
class NewAccountForm(forms.Form):
name = forms.CharField(max_length=255)
sid = forms.CharField(max_length=255)
auth_token = forms.CharField(max_length=255)
active = forms.CharField(max_length=255)
class EditAccountForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
sid = forms.CharField(max_length=255)
auth_token = forms.CharField(max_length=255)
active = forms.CharField(max_length=255)
class PlanAccountForm(forms.Form):
pass
class AssetForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
file = forms.FileField()
class NewGroupForm(forms.Form):
name = forms.CharField(max_length=255)
class EditGroupForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
class NewPlanForm(forms.Form):
name = forms.CharField(max_length=255)
account = forms.CharField(max_length=255)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for p in settings.AVAILABLE_PLATFORMS:
self.fields["platform-{}".format(p["id"])] = forms.CharField(max_length=255, required=False)
for g in Group.objects.all():
self.fields["group-{}".format(g.id)] = forms.CharField(max_length=255, required=False)
for o in Origin.objects.all():
self.fields["origin-{}".format(o.id)] = forms.CharField(max_length=255, required=False)
for t in Template.objects.all():
self.fields["template-{}".format(t.id)] = forms.CharField(max_length=255, required=False)
def clean(self):
platforms = set()
for p in settings.AVAILABLE_PLATFORMS:
if self.cleaned_data.get("platform-{}".format(p["id"])) == p["id"]:
platforms.add(p["id"])
self.cleaned_data["platforms"] = platforms
groups = set()
for g in Group.objects.all():
if self.cleaned_data.get("group-{}".format(g.id)) == str(g.uuid):
groups.add(g)
self.cleaned_data["groups"] = groups
origins = set()
for o in Origin.objects.all():
if self.cleaned_data.get("origin-{}".format(o.id)) == str(o.uuid):
origins.add(o)
self.cleaned_data["origins"] = origins
templates = set()
for t in Template.objects.all():
if self.cleaned_data.get("template-{}".format(t.id)) == str(t.uuid):
templates.add(t)
self.cleaned_data["templates"] = templates
class EditPlanForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
account = forms.CharField(max_length=255)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for p in settings.AVAILABLE_PLATFORMS:
self.fields["platform-{}".format(p["id"])] = forms.CharField(max_length=255, required=False)
for g in Group.objects.all():
self.fields["group-{}".format(g.id)] = forms.CharField(max_length=255, required=False)
for o in Origin.objects.all():
self.fields["origin-{}".format(o.id)] = forms.CharField(max_length=255, required=False)
for t in Template.objects.all():
self.fields["template-{}".format(t.id)] = forms.CharField(max_length=255, required=False)
def clean(self):
platforms = set()
for p in settings.AVAILABLE_PLATFORMS:
if self.cleaned_data.get("platform-{}".format(p["id"])) == p["id"]:
platforms.add(p["id"])
self.cleaned_data["platforms"] = platforms
groups = set()
for g in Group.objects.all():
if self.cleaned_data.get("group-{}".format(g.id)) == str(g.uuid):
groups.add(g)
self.cleaned_data["groups"] = groups
origins = set()
for o in Origin.objects.all():
if self.cleaned_data.get("origin-{}".format(o.id)) == str(o.uuid):
origins.add(o)
self.cleaned_data["origins"] = origins
templates = set()
for t in Template.objects.all():
if self.cleaned_data.get("template-{}".format(t.id)) == str(t.uuid):
templates.add(t)
self.cleaned_data["templates"] = templates
print(self.cleaned_data)
class NewTargetForm(forms.Form):
first_name = forms.CharField(max_length=255)
last_name = forms.CharField(max_length=255)
group = forms.CharField(max_length=36)
phone_number = forms.CharField(max_length=20)
class EditTargetForm(forms.Form):
uuid = forms.CharField(max_length=36)
first_name = forms.CharField(max_length=255)
last_name = forms.CharField(max_length=255)
group = forms.CharField(max_length=36)
phone_number = forms.CharField(max_length=20)
class ImportTargetsForm(forms.Form):
group = forms.CharField(max_length=255)
file_format = forms.CharField(max_length=255)
sheet_name = forms.CharField(max_length=255)
first_name_field = forms.CharField(max_length=255)
last_name_field = forms.CharField(max_length=255)
phone_number_field = forms.CharField(max_length=255)
file = forms.FileField()
class NewTemplateForm(forms.Form):
name = forms.CharField(max_length=255)
body = forms.CharField()
class EditTemplateForm(forms.Form):
uuid = forms.CharField(max_length=36)
name = forms.CharField(max_length=255)
body = forms.CharField()
class DeleteObjectForm(forms.Form):
uuid = forms.CharField(max_length=36)
| true | true |
f7fdd2db2d51ec9818fe7ce7742cef0c58691de1 | 2,496 | py | Python | ietf/secr/proceedings/urls.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2015-09-02T19:53:12.000Z | 2015-09-02T19:53:12.000Z | ietf/secr/proceedings/urls.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ietf/secr/proceedings/urls.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, url
urlpatterns = patterns('ietf.secr.proceedings.views',
url(r'^$', 'main', name='proceedings'),
url(r'^ajax/generate-proceedings/(?P<meeting_num>\d{1,3})/$', 'ajax_generate_proceedings', name='proceedings_ajax_generate_proceedings'),
url(r'^ajax/get-sessions/(?P<meeting_num>\d{1,3})/(?P<acronym>[A-Za-z0-9_\-\+]+)/', 'ajax_get_sessions', name='proceedings_ajax_get_sessions'),
url(r'^ajax/order-slide/$', 'ajax_order_slide', name='proceedings_ajax_order_slide'),
# special offline URL for testing proceedings build
url(r'^build/(?P<meeting_num>\d{1,3}|interim-\d{4}-[A-Za-z0-9_\-\+]+)/(?P<acronym>[A-Za-z0-9_\-\+]+)/$',
'build', name='proceedings_build'),
url(r'^delete/(?P<slide_id>[A-Za-z0-9._\-\+]+)/$', 'delete_material', name='proceedings_delete_material'),
url(r'^edit-slide/(?P<slide_id>[A-Za-z0-9._\-\+]+)/$', 'edit_slide', name='proceedings_edit_slide'),
url(r'^move-slide/(?P<slide_id>[A-Za-z0-9._\-\+]+)/(?P<direction>(up|down))/$',
'move_slide', name='proceedings_move_slide'),
url(r'^process-pdfs/(?P<meeting_num>\d{1,3})/$', 'process_pdfs', name='proceedings_process_pdfs'),
url(r'^progress-report/(?P<meeting_num>\d{1,3})/$', 'progress_report', name='proceedings_progress_report'),
url(r'^replace-slide/(?P<slide_id>[A-Za-z0-9._\-\+]+)/$', 'replace_slide', name='proceedings_replace_slide'),
url(r'^(?P<meeting_num>\d{1,3})/$', 'select', name='proceedings_select'),
url(r'^(?P<meeting_num>\d{1,3})/recording/$', 'recording', name='proceedings_recording'),
url(r'^(?P<meeting_num>\d{1,3})/recording/edit/(?P<name>[A-Za-z0-9_\-\+]+)$', 'recording_edit', name='proceedings_recording_edit'),
# NOTE: we have two entries here which both map to upload_unified, passing session_id or acronym
url(r'^(?P<meeting_num>\d{1,3}|interim-\d{4}-[A-Za-z0-9_\-\+]+)/(?P<session_id>\d{1,6})/$',
'upload_unified', name='proceedings_upload_unified'),
url(r'^(?P<meeting_num>\d{1,3}|interim-\d{4}-[A-Za-z0-9_\-\+]+)/(?P<acronym>[A-Za-z0-9_\-\+]+)/$',
'upload_unified', name='proceedings_upload_unified'),
# interim stuff
url(r'^interim/$', 'select_interim', name='proceedings_select_interim'),
url(r'^interim/(?P<meeting_num>interim-\d{4}-[A-Za-z0-9_\-\+]+)/delete/$', 'delete_interim_meeting',
name='proceedings_delete_interim_meeting'),
url(r'^interim/(?P<acronym>[A-Za-z0-9_\-\+]+)/$', 'interim', name='proceedings_interim'),
)
| 78 | 147 | 0.652644 | from django.conf.urls import patterns, url
urlpatterns = patterns('ietf.secr.proceedings.views',
url(r'^$', 'main', name='proceedings'),
url(r'^ajax/generate-proceedings/(?P<meeting_num>\d{1,3})/$', 'ajax_generate_proceedings', name='proceedings_ajax_generate_proceedings'),
url(r'^ajax/get-sessions/(?P<meeting_num>\d{1,3})/(?P<acronym>[A-Za-z0-9_\-\+]+)/', 'ajax_get_sessions', name='proceedings_ajax_get_sessions'),
url(r'^ajax/order-slide/$', 'ajax_order_slide', name='proceedings_ajax_order_slide'),
url(r'^build/(?P<meeting_num>\d{1,3}|interim-\d{4}-[A-Za-z0-9_\-\+]+)/(?P<acronym>[A-Za-z0-9_\-\+]+)/$',
'build', name='proceedings_build'),
url(r'^delete/(?P<slide_id>[A-Za-z0-9._\-\+]+)/$', 'delete_material', name='proceedings_delete_material'),
url(r'^edit-slide/(?P<slide_id>[A-Za-z0-9._\-\+]+)/$', 'edit_slide', name='proceedings_edit_slide'),
url(r'^move-slide/(?P<slide_id>[A-Za-z0-9._\-\+]+)/(?P<direction>(up|down))/$',
'move_slide', name='proceedings_move_slide'),
url(r'^process-pdfs/(?P<meeting_num>\d{1,3})/$', 'process_pdfs', name='proceedings_process_pdfs'),
url(r'^progress-report/(?P<meeting_num>\d{1,3})/$', 'progress_report', name='proceedings_progress_report'),
url(r'^replace-slide/(?P<slide_id>[A-Za-z0-9._\-\+]+)/$', 'replace_slide', name='proceedings_replace_slide'),
url(r'^(?P<meeting_num>\d{1,3})/$', 'select', name='proceedings_select'),
url(r'^(?P<meeting_num>\d{1,3})/recording/$', 'recording', name='proceedings_recording'),
url(r'^(?P<meeting_num>\d{1,3})/recording/edit/(?P<name>[A-Za-z0-9_\-\+]+)$', 'recording_edit', name='proceedings_recording_edit'),
url(r'^(?P<meeting_num>\d{1,3}|interim-\d{4}-[A-Za-z0-9_\-\+]+)/(?P<session_id>\d{1,6})/$',
'upload_unified', name='proceedings_upload_unified'),
url(r'^(?P<meeting_num>\d{1,3}|interim-\d{4}-[A-Za-z0-9_\-\+]+)/(?P<acronym>[A-Za-z0-9_\-\+]+)/$',
'upload_unified', name='proceedings_upload_unified'),
url(r'^interim/$', 'select_interim', name='proceedings_select_interim'),
url(r'^interim/(?P<meeting_num>interim-\d{4}-[A-Za-z0-9_\-\+]+)/delete/$', 'delete_interim_meeting',
name='proceedings_delete_interim_meeting'),
url(r'^interim/(?P<acronym>[A-Za-z0-9_\-\+]+)/$', 'interim', name='proceedings_interim'),
)
| true | true |
f7fdd357552cff30f0e88b40e5b0f1d75ab20fd9 | 781 | py | Python | django_import_data/migrations/0011_auto_20190705_1351.py | GreenBankObservatory/django-import-data | 80b75f5a1a750c75c1d9f6c759a357cf600d4a5e | [
"MIT"
] | 1 | 2021-09-22T14:37:41.000Z | 2021-09-22T14:37:41.000Z | django_import_data/migrations/0011_auto_20190705_1351.py | GreenBankObservatory/django-import-data | 80b75f5a1a750c75c1d9f6c759a357cf600d4a5e | [
"MIT"
] | null | null | null | django_import_data/migrations/0011_auto_20190705_1351.py | GreenBankObservatory/django-import-data | 80b75f5a1a750c75c1d9f6c759a357cf600d4a5e | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-07-05 17:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_import_data', '0010_auto_20190705_1323'),
]
operations = [
migrations.RemoveField(
model_name='modelimportattempt',
name='file_import_attempt',
),
migrations.AddField(
model_name='modelimporter',
name='file_import_attempt',
field=models.ForeignKey(blank=True, help_text='Reference to the FileImportAttempt this was created from', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='model_import_attempts', to='django_import_data.FileImportAttempt'),
),
]
| 32.541667 | 255 | 0.681178 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_import_data', '0010_auto_20190705_1323'),
]
operations = [
migrations.RemoveField(
model_name='modelimportattempt',
name='file_import_attempt',
),
migrations.AddField(
model_name='modelimporter',
name='file_import_attempt',
field=models.ForeignKey(blank=True, help_text='Reference to the FileImportAttempt this was created from', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='model_import_attempts', to='django_import_data.FileImportAttempt'),
),
]
| true | true |
f7fdd4f9975efb6ebc2d2a7ed003922a73542a88 | 575 | py | Python | todo_list/migrations/0003_auto_20181104_1142.py | zzerjae/TODO-List | 7410bd99385b22098ca842396634329e47c73a29 | [
"MIT"
] | null | null | null | todo_list/migrations/0003_auto_20181104_1142.py | zzerjae/TODO-List | 7410bd99385b22098ca842396634329e47c73a29 | [
"MIT"
] | 1 | 2019-05-23T02:38:57.000Z | 2019-07-03T15:08:42.000Z | todo_list/migrations/0003_auto_20181104_1142.py | zzerjae/TODO-List | 7410bd99385b22098ca842396634329e47c73a29 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.2 on 2018-11-04 02:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo_list', '0002_auto_20181101_2325'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='priority',
field=models.PositiveIntegerField(verbose_name='우선순위'),
),
migrations.AlterField(
model_name='todo',
name='title',
field=models.CharField(max_length=100, verbose_name='제목'),
),
]
| 23.958333 | 70 | 0.587826 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo_list', '0002_auto_20181101_2325'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='priority',
field=models.PositiveIntegerField(verbose_name='우선순위'),
),
migrations.AlterField(
model_name='todo',
name='title',
field=models.CharField(max_length=100, verbose_name='제목'),
),
]
| true | true |
f7fdd504f92b09c803f1c972adb486e08c18b940 | 29,200 | py | Python | pymysqlreplication/tests/test_data_type.py | marching-cube/python-mysql-replication | 54e84afe0ffdc02ddc69653c6ecba5e862c808f5 | [
"Apache-2.0"
] | null | null | null | pymysqlreplication/tests/test_data_type.py | marching-cube/python-mysql-replication | 54e84afe0ffdc02ddc69653c6ecba5e862c808f5 | [
"Apache-2.0"
] | null | null | null | pymysqlreplication/tests/test_data_type.py | marching-cube/python-mysql-replication | 54e84afe0ffdc02ddc69653c6ecba5e862c808f5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import copy
import platform
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from decimal import Decimal
from pymysqlreplication.tests import base
from pymysqlreplication.constants.BINLOG import *
from pymysqlreplication.row_event import *
from pymysqlreplication.event import *
from pymysqlreplication._compat import text_type
__all__ = ["TestDataType"]
def to_binary_dict(d):
def encode_value(v):
if isinstance(v, text_type):
return v.encode()
if isinstance(v, list):
return [encode_value(x) for x in v]
return v
return dict([(k.encode(), encode_value(v)) for (k, v) in d.items()])
class TestDataType(base.PyMySQLReplicationTestCase):
def ignoredEvents(self):
return [GtidEvent]
def create_and_insert_value(self, create_query, insert_query):
self.execute(create_query)
self.execute(insert_query)
self.execute("COMMIT")
self.assertIsInstance(self.stream.fetchone(), RotateEvent)
self.assertIsInstance(self.stream.fetchone(), FormatDescriptionEvent)
#QueryEvent for the Create Table
self.assertIsInstance(self.stream.fetchone(), QueryEvent)
#QueryEvent for the BEGIN
self.assertIsInstance(self.stream.fetchone(), QueryEvent)
self.assertIsInstance(self.stream.fetchone(), TableMapEvent)
event = self.stream.fetchone()
if self.isMySQL56AndMore():
self.assertEqual(event.event_type, WRITE_ROWS_EVENT_V2)
else:
self.assertEqual(event.event_type, WRITE_ROWS_EVENT_V1)
self.assertIsInstance(event, WriteRowsEvent)
return event
def test_decimal(self):
create_query = "CREATE TABLE test (test DECIMAL(2,1))"
insert_query = "INSERT INTO test VALUES(4.2)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.columns[0].precision, 2)
self.assertEqual(event.columns[0].decimals, 1)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("4.2"))
def test_decimal_long_values(self):
create_query = "CREATE TABLE test (\
test DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(42000.123456)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("42000.123456"))
def test_decimal_long_values_1(self):
create_query = "CREATE TABLE test (\
test DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(9000000123.123456)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("9000000123.123456"))
def test_decimal_long_values_2(self):
create_query = "CREATE TABLE test (\
test DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(9000000123.0000012345)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"],
Decimal("9000000123.0000012345"))
def test_decimal_negative_values(self):
create_query = "CREATE TABLE test (\
test DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(-42000.123456)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("-42000.123456"))
def test_decimal_two_values(self):
create_query = "CREATE TABLE test (\
test DECIMAL(2,1), \
test2 DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(4.2, 42000.123456)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("4.2"))
self.assertEqual(event.rows[0]["values"]["test2"], Decimal("42000.123456"))
def test_decimal_with_zero_scale_1(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(10)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("10"))
def test_decimal_with_zero_scale_2(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(12345678912345678912345)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("12345678912345678912345"))
def test_decimal_with_zero_scale_3(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(100000.0)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("100000"))
def test_decimal_with_zero_scale_4(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(-100000.0)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("-100000"))
def test_decimal_with_zero_scale_6(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(-1234567891234567891234)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("-1234567891234567891234"))
def test_tiny(self):
create_query = "CREATE TABLE test (id TINYINT UNSIGNED NOT NULL, test TINYINT)"
insert_query = "INSERT INTO test VALUES(255, -128)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 255)
self.assertEqual(event.rows[0]["values"]["test"], -128)
def test_tiny_maps_to_boolean_true(self):
create_query = "CREATE TABLE test (id TINYINT UNSIGNED NOT NULL, test BOOLEAN)"
insert_query = "INSERT INTO test VALUES(1, TRUE)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 1)
self.assertEqual(type(event.rows[0]["values"]["test"]), type(1))
self.assertEqual(event.rows[0]["values"]["test"], 1)
def test_tiny_maps_to_boolean_false(self):
create_query = "CREATE TABLE test (id TINYINT UNSIGNED NOT NULL, test BOOLEAN)"
insert_query = "INSERT INTO test VALUES(1, FALSE)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 1)
self.assertEqual(type(event.rows[0]["values"]["test"]), type(0))
self.assertEqual(event.rows[0]["values"]["test"], 0)
def test_tiny_maps_to_none(self):
create_query = "CREATE TABLE test (id TINYINT UNSIGNED NOT NULL, test BOOLEAN)"
insert_query = "INSERT INTO test VALUES(1, NULL)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 1)
self.assertEqual(type(event.rows[0]["values"]["test"]), type(None))
self.assertEqual(event.rows[0]["values"]["test"], None)
def test_short(self):
create_query = "CREATE TABLE test (id SMALLINT UNSIGNED NOT NULL, test SMALLINT)"
insert_query = "INSERT INTO test VALUES(65535, -32768)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 65535)
self.assertEqual(event.rows[0]["values"]["test"], -32768)
def test_long(self):
create_query = "CREATE TABLE test (id INT UNSIGNED NOT NULL, test INT)"
insert_query = "INSERT INTO test VALUES(4294967295, -2147483648)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 4294967295)
self.assertEqual(event.rows[0]["values"]["test"], -2147483648)
def test_float(self):
create_query = "CREATE TABLE test (id FLOAT NOT NULL, test FLOAT)"
insert_query = "INSERT INTO test VALUES(42.42, -84.84)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(round(event.rows[0]["values"]["id"], 2), 42.42)
self.assertEqual(round(event.rows[0]["values"]["test"],2 ), -84.84)
def test_double(self):
create_query = "CREATE TABLE test (id DOUBLE NOT NULL, test DOUBLE)"
insert_query = "INSERT INTO test VALUES(42.42, -84.84)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(round(event.rows[0]["values"]["id"], 2), 42.42)
self.assertEqual(round(event.rows[0]["values"]["test"],2 ), -84.84)
def test_timestamp(self):
create_query = "CREATE TABLE test (test TIMESTAMP);"
insert_query = "INSERT INTO test VALUES('1984-12-03 12:33:07')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], datetime.datetime(1984, 12, 3, 12, 33, 7))
def test_timestamp_mysql56(self):
if not self.isMySQL56AndMore():
self.skipTest("Not supported in this version of MySQL")
self.set_sql_mode()
create_query = '''CREATE TABLE test (test0 TIMESTAMP(0),
test1 TIMESTAMP(1),
test2 TIMESTAMP(2),
test3 TIMESTAMP(3),
test4 TIMESTAMP(4),
test5 TIMESTAMP(5),
test6 TIMESTAMP(6));'''
insert_query = '''INSERT INTO test VALUES('1984-12-03 12:33:07',
'1984-12-03 12:33:07.1',
'1984-12-03 12:33:07.12',
'1984-12-03 12:33:07.123',
'1984-12-03 12:33:07.1234',
'1984-12-03 12:33:07.12345',
'1984-12-03 12:33:07.123456')'''
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test0"], datetime.datetime(1984, 12, 3, 12, 33, 7))
self.assertEqual(event.rows[0]["values"]["test1"], datetime.datetime(1984, 12, 3, 12, 33, 7, 100000))
self.assertEqual(event.rows[0]["values"]["test2"], datetime.datetime(1984, 12, 3, 12, 33, 7, 120000))
self.assertEqual(event.rows[0]["values"]["test3"], datetime.datetime(1984, 12, 3, 12, 33, 7, 123000))
self.assertEqual(event.rows[0]["values"]["test4"], datetime.datetime(1984, 12, 3, 12, 33, 7, 123400))
self.assertEqual(event.rows[0]["values"]["test5"], datetime.datetime(1984, 12, 3, 12, 33, 7, 123450))
self.assertEqual(event.rows[0]["values"]["test6"], datetime.datetime(1984, 12, 3, 12, 33, 7, 123456))
def test_longlong(self):
create_query = "CREATE TABLE test (id BIGINT UNSIGNED NOT NULL, test BIGINT)"
insert_query = "INSERT INTO test VALUES(18446744073709551615, -9223372036854775808)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 18446744073709551615)
self.assertEqual(event.rows[0]["values"]["test"], -9223372036854775808)
def test_int24(self):
create_query = "CREATE TABLE test (id MEDIUMINT UNSIGNED NOT NULL, test MEDIUMINT, test2 MEDIUMINT, test3 MEDIUMINT, test4 MEDIUMINT, test5 MEDIUMINT)"
insert_query = "INSERT INTO test VALUES(16777215, 8388607, -8388608, 8, -8, 0)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 16777215)
self.assertEqual(event.rows[0]["values"]["test"], 8388607)
self.assertEqual(event.rows[0]["values"]["test2"], -8388608)
self.assertEqual(event.rows[0]["values"]["test3"], 8)
self.assertEqual(event.rows[0]["values"]["test4"], -8)
self.assertEqual(event.rows[0]["values"]["test5"], 0)
def test_date(self):
create_query = "CREATE TABLE test (test DATE);"
insert_query = "INSERT INTO test VALUES('1984-12-03')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], datetime.date(1984, 12, 3))
def test_zero_date(self):
create_query = "CREATE TABLE test (id INTEGER, test DATE, test2 DATE);"
insert_query = "INSERT INTO test (id, test2) VALUES(1, '0000-01-21')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
self.assertEqual(event.rows[0]["values"]["test2"], None)
def test_zero_month(self):
self.set_sql_mode()
create_query = "CREATE TABLE test (id INTEGER, test DATE, test2 DATE);"
insert_query = "INSERT INTO test (id, test2) VALUES(1, '2015-00-21')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
self.assertEqual(event.rows[0]["values"]["test2"], None)
def test_zero_day(self):
self.set_sql_mode()
create_query = "CREATE TABLE test (id INTEGER, test DATE, test2 DATE);"
insert_query = "INSERT INTO test (id, test2) VALUES(1, '2015-05-00')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
self.assertEqual(event.rows[0]["values"]["test2"], None)
def test_time(self):
create_query = "CREATE TABLE test (test1 TIME, test2 TIME);"
insert_query = "INSERT INTO test VALUES('838:59:59', '-838:59:59')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test1"], datetime.timedelta(
microseconds=(((838*60) + 59)*60 + 59)*1000000
))
self.assertEqual(event.rows[0]["values"]["test2"], datetime.timedelta(
microseconds=(((-838*60) + 59)*60 + 59)*1000000
))
def test_time2(self):
if not self.isMySQL56AndMore():
self.skipTest("Not supported in this version of MySQL")
create_query = "CREATE TABLE test (test1 TIME(6), test2 TIME(6));"
insert_query = """
INSERT INTO test VALUES('838:59:59.000000', '-838:59:59.000000');
"""
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test1"], datetime.timedelta(
microseconds=(((838*60) + 59)*60 + 59)*1000000 + 0
))
self.assertEqual(event.rows[0]["values"]["test2"], datetime.timedelta(
microseconds=(((-838*60) + 59)*60 + 59)*1000000 + 0
))
def test_zero_time(self):
create_query = "CREATE TABLE test (id INTEGER, test TIME NOT NULL DEFAULT 0);"
insert_query = "INSERT INTO test (id) VALUES(1)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], datetime.timedelta(seconds=0))
def test_datetime(self):
create_query = "CREATE TABLE test (test DATETIME);"
insert_query = "INSERT INTO test VALUES('1984-12-03 12:33:07')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], datetime.datetime(1984, 12, 3, 12, 33, 7))
def test_zero_datetime(self):
self.set_sql_mode()
create_query = "CREATE TABLE test (id INTEGER, test DATETIME NOT NULL DEFAULT 0);"
insert_query = "INSERT INTO test (id) VALUES(1)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
def test_broken_datetime(self):
self.set_sql_mode()
create_query = "CREATE TABLE test (test DATETIME NOT NULL);"
insert_query = "INSERT INTO test VALUES('2013-00-00 00:00:00')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
def test_year(self):
if self.isMySQL57():
# https://dev.mysql.com/doc/refman/5.7/en/migrating-to-year4.html
self.skipTest("YEAR(2) is unsupported in mysql 5.7")
create_query = "CREATE TABLE test (a YEAR(4), b YEAR(2))"
insert_query = "INSERT INTO test VALUES(1984, 1984)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["a"], 1984)
self.assertEqual(event.rows[0]["values"]["b"], 1984)
def test_varchar(self):
create_query = "CREATE TABLE test (test VARCHAR(242)) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], 'Hello')
self.assertEqual(event.columns[0].max_length, 242)
def test_bit(self):
create_query = "CREATE TABLE test (test BIT(6), \
test2 BIT(16), \
test3 BIT(12), \
test4 BIT(9), \
test5 BIT(64) \
);"
insert_query = "INSERT INTO test VALUES( \
b'100010', \
b'1000101010111000', \
b'100010101101', \
b'101100111', \
b'1101011010110100100111100011010100010100101110111011101011011010')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.columns[0].bits, 6)
self.assertEqual(event.columns[1].bits, 16)
self.assertEqual(event.columns[2].bits, 12)
self.assertEqual(event.columns[3].bits, 9)
self.assertEqual(event.columns[4].bits, 64)
self.assertEqual(event.rows[0]["values"]["test"], "100010")
self.assertEqual(event.rows[0]["values"]["test2"], "1000101010111000")
self.assertEqual(event.rows[0]["values"]["test3"], "100010101101")
self.assertEqual(event.rows[0]["values"]["test4"], "101100111")
self.assertEqual(event.rows[0]["values"]["test5"], "1101011010110100100111100011010100010100101110111011101011011010")
def test_enum(self):
create_query = "CREATE TABLE test (test ENUM('a', 'ba', 'c'), test2 ENUM('a', 'ba', 'c')) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('ba', 'a')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], 'ba')
self.assertEqual(event.rows[0]["values"]["test2"], 'a')
def test_enum_empty_string(self):
create_query = "CREATE TABLE test (test ENUM('a', 'ba', 'c'), test2 ENUM('a', 'ba', 'c')) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('ba', 'asdf')"
last_sql_mode = self.execute("SELECT @@SESSION.sql_mode;"). \
fetchall()[0][0]
self.execute("SET SESSION sql_mode = 'ANSI';")
event = self.create_and_insert_value(create_query, insert_query)
self.execute("SET SESSION sql_mode = '%s';" % last_sql_mode)
self.assertEqual(event.rows[0]["values"]["test"], 'ba')
self.assertEqual(event.rows[0]["values"]["test2"], '')
def test_set(self):
create_query = "CREATE TABLE test (test SET('a', 'ba', 'c'), test2 SET('a', 'ba', 'c')) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('ba,a,c', 'a,c')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], set(('a', 'ba', 'c')))
self.assertEqual(event.rows[0]["values"]["test2"], set(('a', 'c')))
def test_tiny_blob(self):
create_query = "CREATE TABLE test (test TINYBLOB, test2 TINYTEXT) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello', 'World')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'Hello')
self.assertEqual(event.rows[0]["values"]["test2"], 'World')
def test_medium_blob(self):
create_query = "CREATE TABLE test (test MEDIUMBLOB, test2 MEDIUMTEXT) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello', 'World')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'Hello')
self.assertEqual(event.rows[0]["values"]["test2"], 'World')
def test_long_blob(self):
create_query = "CREATE TABLE test (test LONGBLOB, test2 LONGTEXT) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello', 'World')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'Hello')
self.assertEqual(event.rows[0]["values"]["test2"], 'World')
def test_blob(self):
create_query = "CREATE TABLE test (test BLOB, test2 TEXT) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello', 'World')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'Hello')
self.assertEqual(event.rows[0]["values"]["test2"], 'World')
def test_string(self):
create_query = "CREATE TABLE test (test CHAR(12)) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], 'Hello')
def test_geometry(self):
create_query = "CREATE TABLE test (test GEOMETRY);"
insert_query = "INSERT INTO test VALUES(GeomFromText('POINT(1 1)'))"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\xf0?')
def test_json(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '{"my_key": "my_val", "my_key2": "my_val2"}');"""
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], {b"my_key": b"my_val", b"my_key2": b"my_val2"})
def test_json_array(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '["my_val", "my_val2"]');"""
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], [b'my_val', b'my_val2'])
def test_json_large(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
data = dict([('foooo%i'%i, 'baaaaar%i'%i) for i in range(2560)]) # Make it large enough to reach 2^16 length
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '%s');""" % json.dumps(data)
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], to_binary_dict(data))
def test_json_types(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
types = [
True,
False,
None,
1.2,
2^14,
2^30,
2^62,
-1 * 2^14,
-1 * 2^30,
-1 * 2^62,
['foo', 'bar']
]
for t in types:
data = {'foo': t}
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '%s');""" % json.dumps(data)
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], to_binary_dict(data))
self.tearDown()
self.setUp()
def test_json_basic(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
types = [
True,
False,
None,
1.2,
2^14,
2^30,
2^62,
-1 * 2^14,
-1 * 2^30,
-1 * 2^62,
]
for data in types:
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '%s');""" % json.dumps(data)
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], data)
self.tearDown()
self.setUp()
def test_json_unicode(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
create_query = "CREATE TABLE test (id int, value json);"
insert_query = u"""INSERT INTO test (id, value) VALUES (1, '{"miam": "🍔"}');"""
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"][b"miam"], u'🍔'.encode('utf8'))
def test_json_long_string(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
create_query = "CREATE TABLE test (id int, value json);"
# The string length needs to be larger than what can fit in a single byte.
string_value = "super_long_string" * 100
insert_query = "INSERT INTO test (id, value) VALUES (1, '{\"my_key\": \"%s\"}');" % (string_value,)
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], to_binary_dict({"my_key": string_value}))
def test_null(self):
create_query = "CREATE TABLE test ( \
test TINYINT NULL DEFAULT NULL, \
test2 TINYINT NULL DEFAULT NULL, \
test3 TINYINT NULL DEFAULT NULL, \
test4 TINYINT NULL DEFAULT NULL, \
test5 TINYINT NULL DEFAULT NULL, \
test6 TINYINT NULL DEFAULT NULL, \
test7 TINYINT NULL DEFAULT NULL, \
test8 TINYINT NULL DEFAULT NULL, \
test9 TINYINT NULL DEFAULT NULL, \
test10 TINYINT NULL DEFAULT NULL, \
test11 TINYINT NULL DEFAULT NULL, \
test12 TINYINT NULL DEFAULT NULL, \
test13 TINYINT NULL DEFAULT NULL, \
test14 TINYINT NULL DEFAULT NULL, \
test15 TINYINT NULL DEFAULT NULL, \
test16 TINYINT NULL DEFAULT NULL, \
test17 TINYINT NULL DEFAULT NULL, \
test18 TINYINT NULL DEFAULT NULL, \
test19 TINYINT NULL DEFAULT NULL, \
test20 TINYINT NULL DEFAULT NULL\
)"
insert_query = "INSERT INTO test (test, test2, test3, test7, test20) VALUES(NULL, -128, NULL, 42, 84)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
self.assertEqual(event.rows[0]["values"]["test2"], -128)
self.assertEqual(event.rows[0]["values"]["test3"], None)
self.assertEqual(event.rows[0]["values"]["test7"], 42)
self.assertEqual(event.rows[0]["values"]["test20"], 84)
def test_encoding_latin1(self):
db = copy.copy(self.database)
db["charset"] = "latin1"
self.connect_conn_control(db)
if platform.python_version_tuple()[0] == "2":
string = unichr(233)
else:
string = "\u00e9"
create_query = "CREATE TABLE test (test CHAR(12)) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = b"INSERT INTO test VALUES('" + string.encode('latin-1') + b"');"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], string)
def test_encoding_utf8(self):
if platform.python_version_tuple()[0] == "2":
string = unichr(0x20ac)
else:
string = "\u20ac"
create_query = "CREATE TABLE test (test CHAR(12)) CHARACTER SET utf8 COLLATE utf8_bin;"
insert_query = b"INSERT INTO test VALUES('" + string.encode('utf-8') + b"')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertMultiLineEqual(event.rows[0]["values"]["test"], string)
if __name__ == "__main__":
unittest.main()
| 48.747913 | 159 | 0.63637 |
import copy
import platform
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from decimal import Decimal
from pymysqlreplication.tests import base
from pymysqlreplication.constants.BINLOG import *
from pymysqlreplication.row_event import *
from pymysqlreplication.event import *
from pymysqlreplication._compat import text_type
__all__ = ["TestDataType"]
def to_binary_dict(d):
def encode_value(v):
if isinstance(v, text_type):
return v.encode()
if isinstance(v, list):
return [encode_value(x) for x in v]
return v
return dict([(k.encode(), encode_value(v)) for (k, v) in d.items()])
class TestDataType(base.PyMySQLReplicationTestCase):
def ignoredEvents(self):
return [GtidEvent]
def create_and_insert_value(self, create_query, insert_query):
self.execute(create_query)
self.execute(insert_query)
self.execute("COMMIT")
self.assertIsInstance(self.stream.fetchone(), RotateEvent)
self.assertIsInstance(self.stream.fetchone(), FormatDescriptionEvent)
self.assertIsInstance(self.stream.fetchone(), QueryEvent)
self.assertIsInstance(self.stream.fetchone(), QueryEvent)
self.assertIsInstance(self.stream.fetchone(), TableMapEvent)
event = self.stream.fetchone()
if self.isMySQL56AndMore():
self.assertEqual(event.event_type, WRITE_ROWS_EVENT_V2)
else:
self.assertEqual(event.event_type, WRITE_ROWS_EVENT_V1)
self.assertIsInstance(event, WriteRowsEvent)
return event
def test_decimal(self):
create_query = "CREATE TABLE test (test DECIMAL(2,1))"
insert_query = "INSERT INTO test VALUES(4.2)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.columns[0].precision, 2)
self.assertEqual(event.columns[0].decimals, 1)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("4.2"))
def test_decimal_long_values(self):
create_query = "CREATE TABLE test (\
test DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(42000.123456)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("42000.123456"))
def test_decimal_long_values_1(self):
create_query = "CREATE TABLE test (\
test DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(9000000123.123456)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("9000000123.123456"))
def test_decimal_long_values_2(self):
create_query = "CREATE TABLE test (\
test DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(9000000123.0000012345)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"],
Decimal("9000000123.0000012345"))
def test_decimal_negative_values(self):
create_query = "CREATE TABLE test (\
test DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(-42000.123456)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("-42000.123456"))
def test_decimal_two_values(self):
create_query = "CREATE TABLE test (\
test DECIMAL(2,1), \
test2 DECIMAL(20,10) \
)"
insert_query = "INSERT INTO test VALUES(4.2, 42000.123456)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("4.2"))
self.assertEqual(event.rows[0]["values"]["test2"], Decimal("42000.123456"))
def test_decimal_with_zero_scale_1(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(10)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("10"))
def test_decimal_with_zero_scale_2(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(12345678912345678912345)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("12345678912345678912345"))
def test_decimal_with_zero_scale_3(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(100000.0)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("100000"))
def test_decimal_with_zero_scale_4(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(-100000.0)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("-100000"))
def test_decimal_with_zero_scale_6(self):
create_query = "CREATE TABLE test (test DECIMAL(23,0))"
insert_query = "INSERT INTO test VALUES(-1234567891234567891234)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], Decimal("-1234567891234567891234"))
def test_tiny(self):
create_query = "CREATE TABLE test (id TINYINT UNSIGNED NOT NULL, test TINYINT)"
insert_query = "INSERT INTO test VALUES(255, -128)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 255)
self.assertEqual(event.rows[0]["values"]["test"], -128)
def test_tiny_maps_to_boolean_true(self):
create_query = "CREATE TABLE test (id TINYINT UNSIGNED NOT NULL, test BOOLEAN)"
insert_query = "INSERT INTO test VALUES(1, TRUE)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 1)
self.assertEqual(type(event.rows[0]["values"]["test"]), type(1))
self.assertEqual(event.rows[0]["values"]["test"], 1)
def test_tiny_maps_to_boolean_false(self):
create_query = "CREATE TABLE test (id TINYINT UNSIGNED NOT NULL, test BOOLEAN)"
insert_query = "INSERT INTO test VALUES(1, FALSE)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 1)
self.assertEqual(type(event.rows[0]["values"]["test"]), type(0))
self.assertEqual(event.rows[0]["values"]["test"], 0)
def test_tiny_maps_to_none(self):
create_query = "CREATE TABLE test (id TINYINT UNSIGNED NOT NULL, test BOOLEAN)"
insert_query = "INSERT INTO test VALUES(1, NULL)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 1)
self.assertEqual(type(event.rows[0]["values"]["test"]), type(None))
self.assertEqual(event.rows[0]["values"]["test"], None)
def test_short(self):
create_query = "CREATE TABLE test (id SMALLINT UNSIGNED NOT NULL, test SMALLINT)"
insert_query = "INSERT INTO test VALUES(65535, -32768)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 65535)
self.assertEqual(event.rows[0]["values"]["test"], -32768)
def test_long(self):
create_query = "CREATE TABLE test (id INT UNSIGNED NOT NULL, test INT)"
insert_query = "INSERT INTO test VALUES(4294967295, -2147483648)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 4294967295)
self.assertEqual(event.rows[0]["values"]["test"], -2147483648)
def test_float(self):
create_query = "CREATE TABLE test (id FLOAT NOT NULL, test FLOAT)"
insert_query = "INSERT INTO test VALUES(42.42, -84.84)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(round(event.rows[0]["values"]["id"], 2), 42.42)
self.assertEqual(round(event.rows[0]["values"]["test"],2 ), -84.84)
def test_double(self):
create_query = "CREATE TABLE test (id DOUBLE NOT NULL, test DOUBLE)"
insert_query = "INSERT INTO test VALUES(42.42, -84.84)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(round(event.rows[0]["values"]["id"], 2), 42.42)
self.assertEqual(round(event.rows[0]["values"]["test"],2 ), -84.84)
def test_timestamp(self):
create_query = "CREATE TABLE test (test TIMESTAMP);"
insert_query = "INSERT INTO test VALUES('1984-12-03 12:33:07')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], datetime.datetime(1984, 12, 3, 12, 33, 7))
def test_timestamp_mysql56(self):
if not self.isMySQL56AndMore():
self.skipTest("Not supported in this version of MySQL")
self.set_sql_mode()
create_query = '''CREATE TABLE test (test0 TIMESTAMP(0),
test1 TIMESTAMP(1),
test2 TIMESTAMP(2),
test3 TIMESTAMP(3),
test4 TIMESTAMP(4),
test5 TIMESTAMP(5),
test6 TIMESTAMP(6));'''
insert_query = '''INSERT INTO test VALUES('1984-12-03 12:33:07',
'1984-12-03 12:33:07.1',
'1984-12-03 12:33:07.12',
'1984-12-03 12:33:07.123',
'1984-12-03 12:33:07.1234',
'1984-12-03 12:33:07.12345',
'1984-12-03 12:33:07.123456')'''
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test0"], datetime.datetime(1984, 12, 3, 12, 33, 7))
self.assertEqual(event.rows[0]["values"]["test1"], datetime.datetime(1984, 12, 3, 12, 33, 7, 100000))
self.assertEqual(event.rows[0]["values"]["test2"], datetime.datetime(1984, 12, 3, 12, 33, 7, 120000))
self.assertEqual(event.rows[0]["values"]["test3"], datetime.datetime(1984, 12, 3, 12, 33, 7, 123000))
self.assertEqual(event.rows[0]["values"]["test4"], datetime.datetime(1984, 12, 3, 12, 33, 7, 123400))
self.assertEqual(event.rows[0]["values"]["test5"], datetime.datetime(1984, 12, 3, 12, 33, 7, 123450))
self.assertEqual(event.rows[0]["values"]["test6"], datetime.datetime(1984, 12, 3, 12, 33, 7, 123456))
def test_longlong(self):
create_query = "CREATE TABLE test (id BIGINT UNSIGNED NOT NULL, test BIGINT)"
insert_query = "INSERT INTO test VALUES(18446744073709551615, -9223372036854775808)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 18446744073709551615)
self.assertEqual(event.rows[0]["values"]["test"], -9223372036854775808)
def test_int24(self):
create_query = "CREATE TABLE test (id MEDIUMINT UNSIGNED NOT NULL, test MEDIUMINT, test2 MEDIUMINT, test3 MEDIUMINT, test4 MEDIUMINT, test5 MEDIUMINT)"
insert_query = "INSERT INTO test VALUES(16777215, 8388607, -8388608, 8, -8, 0)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["id"], 16777215)
self.assertEqual(event.rows[0]["values"]["test"], 8388607)
self.assertEqual(event.rows[0]["values"]["test2"], -8388608)
self.assertEqual(event.rows[0]["values"]["test3"], 8)
self.assertEqual(event.rows[0]["values"]["test4"], -8)
self.assertEqual(event.rows[0]["values"]["test5"], 0)
def test_date(self):
create_query = "CREATE TABLE test (test DATE);"
insert_query = "INSERT INTO test VALUES('1984-12-03')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], datetime.date(1984, 12, 3))
def test_zero_date(self):
create_query = "CREATE TABLE test (id INTEGER, test DATE, test2 DATE);"
insert_query = "INSERT INTO test (id, test2) VALUES(1, '0000-01-21')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
self.assertEqual(event.rows[0]["values"]["test2"], None)
def test_zero_month(self):
self.set_sql_mode()
create_query = "CREATE TABLE test (id INTEGER, test DATE, test2 DATE);"
insert_query = "INSERT INTO test (id, test2) VALUES(1, '2015-00-21')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
self.assertEqual(event.rows[0]["values"]["test2"], None)
def test_zero_day(self):
self.set_sql_mode()
create_query = "CREATE TABLE test (id INTEGER, test DATE, test2 DATE);"
insert_query = "INSERT INTO test (id, test2) VALUES(1, '2015-05-00')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
self.assertEqual(event.rows[0]["values"]["test2"], None)
def test_time(self):
create_query = "CREATE TABLE test (test1 TIME, test2 TIME);"
insert_query = "INSERT INTO test VALUES('838:59:59', '-838:59:59')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test1"], datetime.timedelta(
microseconds=(((838*60) + 59)*60 + 59)*1000000
))
self.assertEqual(event.rows[0]["values"]["test2"], datetime.timedelta(
microseconds=(((-838*60) + 59)*60 + 59)*1000000
))
def test_time2(self):
if not self.isMySQL56AndMore():
self.skipTest("Not supported in this version of MySQL")
create_query = "CREATE TABLE test (test1 TIME(6), test2 TIME(6));"
insert_query = """
INSERT INTO test VALUES('838:59:59.000000', '-838:59:59.000000');
"""
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test1"], datetime.timedelta(
microseconds=(((838*60) + 59)*60 + 59)*1000000 + 0
))
self.assertEqual(event.rows[0]["values"]["test2"], datetime.timedelta(
microseconds=(((-838*60) + 59)*60 + 59)*1000000 + 0
))
def test_zero_time(self):
create_query = "CREATE TABLE test (id INTEGER, test TIME NOT NULL DEFAULT 0);"
insert_query = "INSERT INTO test (id) VALUES(1)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], datetime.timedelta(seconds=0))
def test_datetime(self):
create_query = "CREATE TABLE test (test DATETIME);"
insert_query = "INSERT INTO test VALUES('1984-12-03 12:33:07')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], datetime.datetime(1984, 12, 3, 12, 33, 7))
def test_zero_datetime(self):
self.set_sql_mode()
create_query = "CREATE TABLE test (id INTEGER, test DATETIME NOT NULL DEFAULT 0);"
insert_query = "INSERT INTO test (id) VALUES(1)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
def test_broken_datetime(self):
self.set_sql_mode()
create_query = "CREATE TABLE test (test DATETIME NOT NULL);"
insert_query = "INSERT INTO test VALUES('2013-00-00 00:00:00')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
def test_year(self):
if self.isMySQL57():
self.skipTest("YEAR(2) is unsupported in mysql 5.7")
create_query = "CREATE TABLE test (a YEAR(4), b YEAR(2))"
insert_query = "INSERT INTO test VALUES(1984, 1984)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["a"], 1984)
self.assertEqual(event.rows[0]["values"]["b"], 1984)
def test_varchar(self):
create_query = "CREATE TABLE test (test VARCHAR(242)) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], 'Hello')
self.assertEqual(event.columns[0].max_length, 242)
def test_bit(self):
create_query = "CREATE TABLE test (test BIT(6), \
test2 BIT(16), \
test3 BIT(12), \
test4 BIT(9), \
test5 BIT(64) \
);"
insert_query = "INSERT INTO test VALUES( \
b'100010', \
b'1000101010111000', \
b'100010101101', \
b'101100111', \
b'1101011010110100100111100011010100010100101110111011101011011010')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.columns[0].bits, 6)
self.assertEqual(event.columns[1].bits, 16)
self.assertEqual(event.columns[2].bits, 12)
self.assertEqual(event.columns[3].bits, 9)
self.assertEqual(event.columns[4].bits, 64)
self.assertEqual(event.rows[0]["values"]["test"], "100010")
self.assertEqual(event.rows[0]["values"]["test2"], "1000101010111000")
self.assertEqual(event.rows[0]["values"]["test3"], "100010101101")
self.assertEqual(event.rows[0]["values"]["test4"], "101100111")
self.assertEqual(event.rows[0]["values"]["test5"], "1101011010110100100111100011010100010100101110111011101011011010")
def test_enum(self):
create_query = "CREATE TABLE test (test ENUM('a', 'ba', 'c'), test2 ENUM('a', 'ba', 'c')) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('ba', 'a')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], 'ba')
self.assertEqual(event.rows[0]["values"]["test2"], 'a')
def test_enum_empty_string(self):
create_query = "CREATE TABLE test (test ENUM('a', 'ba', 'c'), test2 ENUM('a', 'ba', 'c')) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('ba', 'asdf')"
last_sql_mode = self.execute("SELECT @@SESSION.sql_mode;"). \
fetchall()[0][0]
self.execute("SET SESSION sql_mode = 'ANSI';")
event = self.create_and_insert_value(create_query, insert_query)
self.execute("SET SESSION sql_mode = '%s';" % last_sql_mode)
self.assertEqual(event.rows[0]["values"]["test"], 'ba')
self.assertEqual(event.rows[0]["values"]["test2"], '')
def test_set(self):
create_query = "CREATE TABLE test (test SET('a', 'ba', 'c'), test2 SET('a', 'ba', 'c')) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('ba,a,c', 'a,c')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], set(('a', 'ba', 'c')))
self.assertEqual(event.rows[0]["values"]["test2"], set(('a', 'c')))
def test_tiny_blob(self):
create_query = "CREATE TABLE test (test TINYBLOB, test2 TINYTEXT) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello', 'World')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'Hello')
self.assertEqual(event.rows[0]["values"]["test2"], 'World')
def test_medium_blob(self):
create_query = "CREATE TABLE test (test MEDIUMBLOB, test2 MEDIUMTEXT) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello', 'World')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'Hello')
self.assertEqual(event.rows[0]["values"]["test2"], 'World')
def test_long_blob(self):
create_query = "CREATE TABLE test (test LONGBLOB, test2 LONGTEXT) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello', 'World')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'Hello')
self.assertEqual(event.rows[0]["values"]["test2"], 'World')
def test_blob(self):
create_query = "CREATE TABLE test (test BLOB, test2 TEXT) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello', 'World')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'Hello')
self.assertEqual(event.rows[0]["values"]["test2"], 'World')
def test_string(self):
create_query = "CREATE TABLE test (test CHAR(12)) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = "INSERT INTO test VALUES('Hello')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], 'Hello')
def test_geometry(self):
create_query = "CREATE TABLE test (test GEOMETRY);"
insert_query = "INSERT INTO test VALUES(GeomFromText('POINT(1 1)'))"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], b'\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\xf0?')
def test_json(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '{"my_key": "my_val", "my_key2": "my_val2"}');"""
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], {b"my_key": b"my_val", b"my_key2": b"my_val2"})
def test_json_array(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '["my_val", "my_val2"]');"""
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], [b'my_val', b'my_val2'])
def test_json_large(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
data = dict([('foooo%i'%i, 'baaaaar%i'%i) for i in range(2560)])
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '%s');""" % json.dumps(data)
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], to_binary_dict(data))
def test_json_types(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
types = [
True,
False,
None,
1.2,
2^14,
2^30,
2^62,
-1 * 2^14,
-1 * 2^30,
-1 * 2^62,
['foo', 'bar']
]
for t in types:
data = {'foo': t}
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '%s');""" % json.dumps(data)
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], to_binary_dict(data))
self.tearDown()
self.setUp()
def test_json_basic(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
types = [
True,
False,
None,
1.2,
2^14,
2^30,
2^62,
-1 * 2^14,
-1 * 2^30,
-1 * 2^62,
]
for data in types:
create_query = "CREATE TABLE test (id int, value json);"
insert_query = """INSERT INTO test (id, value) VALUES (1, '%s');""" % json.dumps(data)
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], data)
self.tearDown()
self.setUp()
def test_json_unicode(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
create_query = "CREATE TABLE test (id int, value json);"
insert_query = u"""INSERT INTO test (id, value) VALUES (1, '{"miam": "🍔"}');"""
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"][b"miam"], u'🍔'.encode('utf8'))
def test_json_long_string(self):
if not self.isMySQL57():
self.skipTest("Json is only supported in mysql 5.7")
create_query = "CREATE TABLE test (id int, value json);"
string_value = "super_long_string" * 100
insert_query = "INSERT INTO test (id, value) VALUES (1, '{\"my_key\": \"%s\"}');" % (string_value,)
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["value"], to_binary_dict({"my_key": string_value}))
def test_null(self):
create_query = "CREATE TABLE test ( \
test TINYINT NULL DEFAULT NULL, \
test2 TINYINT NULL DEFAULT NULL, \
test3 TINYINT NULL DEFAULT NULL, \
test4 TINYINT NULL DEFAULT NULL, \
test5 TINYINT NULL DEFAULT NULL, \
test6 TINYINT NULL DEFAULT NULL, \
test7 TINYINT NULL DEFAULT NULL, \
test8 TINYINT NULL DEFAULT NULL, \
test9 TINYINT NULL DEFAULT NULL, \
test10 TINYINT NULL DEFAULT NULL, \
test11 TINYINT NULL DEFAULT NULL, \
test12 TINYINT NULL DEFAULT NULL, \
test13 TINYINT NULL DEFAULT NULL, \
test14 TINYINT NULL DEFAULT NULL, \
test15 TINYINT NULL DEFAULT NULL, \
test16 TINYINT NULL DEFAULT NULL, \
test17 TINYINT NULL DEFAULT NULL, \
test18 TINYINT NULL DEFAULT NULL, \
test19 TINYINT NULL DEFAULT NULL, \
test20 TINYINT NULL DEFAULT NULL\
)"
insert_query = "INSERT INTO test (test, test2, test3, test7, test20) VALUES(NULL, -128, NULL, 42, 84)"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], None)
self.assertEqual(event.rows[0]["values"]["test2"], -128)
self.assertEqual(event.rows[0]["values"]["test3"], None)
self.assertEqual(event.rows[0]["values"]["test7"], 42)
self.assertEqual(event.rows[0]["values"]["test20"], 84)
def test_encoding_latin1(self):
db = copy.copy(self.database)
db["charset"] = "latin1"
self.connect_conn_control(db)
if platform.python_version_tuple()[0] == "2":
string = unichr(233)
else:
string = "\u00e9"
create_query = "CREATE TABLE test (test CHAR(12)) CHARACTER SET latin1 COLLATE latin1_bin;"
insert_query = b"INSERT INTO test VALUES('" + string.encode('latin-1') + b"');"
event = self.create_and_insert_value(create_query, insert_query)
self.assertEqual(event.rows[0]["values"]["test"], string)
def test_encoding_utf8(self):
if platform.python_version_tuple()[0] == "2":
string = unichr(0x20ac)
else:
string = "\u20ac"
create_query = "CREATE TABLE test (test CHAR(12)) CHARACTER SET utf8 COLLATE utf8_bin;"
insert_query = b"INSERT INTO test VALUES('" + string.encode('utf-8') + b"')"
event = self.create_and_insert_value(create_query, insert_query)
self.assertMultiLineEqual(event.rows[0]["values"]["test"], string)
if __name__ == "__main__":
unittest.main()
| true | true |
f7fdd52c99651adf569d5833e47ab1c4d6a723af | 5,116 | py | Python | contrib/linearize/linearize-hashes.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | contrib/linearize/linearize-hashes.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | contrib/linearize/linearize-hashes.py | Darrenshome40/shitecoin | a2535c8fc5a43ee21ec818d5367439f6302cd084 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2019 The shitecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from http.client import HTTPConnection
import json
import re
import base64
import sys
import os
import os.path
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class shitecoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = shitecoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search(r'^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search(r'^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| 33.437908 | 108 | 0.607506 |
from http.client import HTTPConnection
import json
import re
import base64
import sys
import os
import os.path
settings = {}
def hex_switchEndian(s):
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class shitecoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = shitecoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x)
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
m = re.search(r'^\s*#', line)
if m:
continue
m = re.search(r'^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| true | true |
f7fdd5bc80e595be22faeed0580a4c27329ccb0c | 11,841 | py | Python | tests/models/dsheetpiling/test_profiles.py | Deltares/GEOLib | 73c3f325ba40a3e0c586e337541d491f4296f50c | [
"MIT"
] | 4 | 2021-10-29T21:30:47.000Z | 2022-03-18T13:15:17.000Z | tests/models/dsheetpiling/test_profiles.py | Deltares/GEOLib | 73c3f325ba40a3e0c586e337541d491f4296f50c | [
"MIT"
] | 3 | 2021-11-05T07:56:16.000Z | 2022-03-27T13:27:05.000Z | tests/models/dsheetpiling/test_profiles.py | Deltares/GEOLib | 73c3f325ba40a3e0c586e337541d491f4296f50c | [
"MIT"
] | 4 | 2021-10-29T21:30:51.000Z | 2022-01-17T13:20:40.000Z | from contextlib import nullcontext as does_not_raise
from typing import Callable, List
import pytest
from pydantic import ValidationError
from geolib.geometry.one import Point
from geolib.models.dsheetpiling.dsheetpiling_model import DSheetPilingModel
from geolib.models.dsheetpiling.internal import _DEFAULT_SOIL_PROFILE_NAME
from geolib.models.dsheetpiling.internal import SoilProfile as InternalProfile
from geolib.models.dsheetpiling.internal import SoilProfiles
from geolib.models.dsheetpiling.profiles import SoilLayer, SoilProfile
from geolib.models.dsheetpiling.settings import (
CurveSettings,
LateralEarthPressureMethodStage,
ModulusReactionType,
PassiveSide,
Side,
)
from geolib.soils import Soil
_SOIL_TEST_NAME_1: str = "Clay"
_SOIL_TEST_NAME_2: str = "Sand"
_PROFILE_TEST_NAME: str = "test profiel"
_PROFILE_TEST_COORDINATES: Point = Point(x=100, y=250)
_TEST_LAYERS: List[SoilLayer] = [
SoilLayer(top_of_layer=0, soil=_SOIL_TEST_NAME_1),
SoilLayer(top_of_layer=-2, soil=_SOIL_TEST_NAME_2),
]
@pytest.fixture
def _model() -> DSheetPilingModel:
model = DSheetPilingModel()
model.add_stage(
name="Initial stage",
passive_side=PassiveSide.DSHEETPILING_DETERMINED,
method_left=LateralEarthPressureMethodStage.KA_KO_KP,
method_right=LateralEarthPressureMethodStage.KA_KO_KP,
)
model.add_soil(Soil(name=_SOIL_TEST_NAME_1))
model.add_soil(Soil(name=_SOIL_TEST_NAME_2))
return model
@pytest.fixture
def make_profile() -> SoilProfile:
def _make_profile(name: str):
return SoilProfile(
name=name, layers=_TEST_LAYERS, coordinate=_PROFILE_TEST_COORDINATES
)
return _make_profile
class TestCurveSettings:
@pytest.mark.unittest
@pytest.mark.parametrize(
"modulus_reaction_type",
[
pytest.param(ModulusReactionType.TANGENT),
pytest.param(ModulusReactionType.SECANT),
],
)
@pytest.mark.parametrize(
"use_unloading_reloading_curve",
[
pytest.param(True),
pytest.param(False),
],
)
@pytest.mark.parametrize(
"curve_number,raising_context",
[
pytest.param(
0,
pytest.raises(
ValidationError,
match=r"ensure this value is greater than or equal to 1",
),
id="Lower than allowed",
),
pytest.param(1, does_not_raise()),
pytest.param(2, does_not_raise()),
pytest.param(3, does_not_raise()),
pytest.param(4, does_not_raise()),
pytest.param(
5,
pytest.raises(
ValidationError, match=r"ensure this value is less than or equal to 4"
),
id="Higher than allowed",
),
],
)
def test_curve_settings(
self,
modulus_reaction_type: ModulusReactionType,
use_unloading_reloading_curve: bool,
curve_number: int,
raising_context,
):
with raising_context:
CurveSettings(
modulus_reaction_type=modulus_reaction_type,
use_unloading_reloading_curve=use_unloading_reloading_curve,
curve_number=curve_number,
)
@pytest.mark.integrationtest
@pytest.mark.parametrize(
"modulus_reaction_type",
[
pytest.param(ModulusReactionType.TANGENT),
pytest.param(ModulusReactionType.SECANT),
],
)
@pytest.mark.parametrize(
"use_unloading_reloading_curve",
[
pytest.param(True),
pytest.param(False),
],
)
@pytest.mark.parametrize(
"curve_number",
[pytest.param(1), pytest.param(2), pytest.param(3), pytest.param(4)],
)
def test_dsheetpilingmodel_add_curve_settings_internal_soil_profiles_updated(
self,
_model: DSheetPilingModel,
modulus_reaction_type: ModulusReactionType,
use_unloading_reloading_curve: bool,
curve_number: int,
):
"""Validate if curve settings are refered in [SOIL PROFILES]"""
curve_settings = CurveSettings(
modulus_reaction_type=modulus_reaction_type,
use_unloading_reloading_curve=use_unloading_reloading_curve,
curve_number=curve_number,
)
_model.set_curve_settings(curve_settings=curve_settings)
assert isinstance(_model.datastructure.input_data.soil_profiles, SoilProfiles)
assert (
_model.datastructure.input_data.soil_profiles.modulus_reaction_type
== modulus_reaction_type.value
)
assert (
_model.datastructure.input_data.soil_profiles.use_unloading_reloading_curve
== use_unloading_reloading_curve
)
assert _model.datastructure.input_data.soil_profiles.curve_number == curve_number
class TestSoilProfile:
@pytest.mark.unittest
@pytest.mark.parametrize(
"layers,raise_context",
[
pytest.param(
[SoilLayer(top_of_layer=0, soil=_SOIL_TEST_NAME_1)],
does_not_raise(),
id="Single layer",
),
pytest.param(
[
SoilLayer(top_of_layer=0, soil=_SOIL_TEST_NAME_1),
SoilLayer(top_of_layer=-2, soil=_SOIL_TEST_NAME_2),
],
does_not_raise(),
id="Multiple layers",
),
pytest.param(
[],
pytest.raises(
ValidationError, match=r"ensure this value has at least 1 items"
),
id="No layers",
),
pytest.param(
[
SoilLayer(top_of_layer=0, soil=_SOIL_TEST_NAME_1),
SoilLayer(top_of_layer=2, soil=_SOIL_TEST_NAME_2),
],
pytest.raises(
ValidationError,
match=r"Top of layer must decrease with each layer along depth. Top of layers: ",
),
id="Multiple layers, top of layer not decreasing",
),
],
)
def test_profile_initialization_with_different_points_arguments(
self, layers: List[SoilLayer], raise_context
):
with raise_context:
profile = SoilProfile(name=_PROFILE_TEST_NAME, layers=layers)
try:
internal = profile.to_internal()
except UnboundLocalError:
return
assert profile.name == internal.name
assert profile.coordinate.x == internal.coordinate.x
assert profile.coordinate.y == internal.coordinate.y
assert profile.coordinate.z == internal.coordinate.z
assert len(profile.layers) == len(internal.layers)
for layer, internal_layer in zip(profile.layers, internal.layers):
assert layer.top_of_layer == internal_layer.top_of_layer
assert layer.soil == internal_layer.soil
assert layer.water_pressure_top == internal_layer.water_pressure_top
assert layer.water_pressure_bottom == internal_layer.water_pressure_bottom
@pytest.mark.integrationtest
def test_dsheetpilingmodel_add_profile_soil_unknown_raises_ValueError(
self, _model: DSheetPilingModel
):
current_stage = _model.current_stage
name_not_in_soil_collection = "not in soil collection"
layers = [SoilLayer(top_of_layer=0, soil=name_not_in_soil_collection)]
profile = SoilProfile(name=_PROFILE_TEST_NAME, layers=layers)
assert (
name_not_in_soil_collection
not in _model.datastructure.input_data.soil_collection.soil_names
)
with pytest.raises(ValueError):
_model.add_profile(profile=profile, side=Side.BOTH, stage_id=current_stage)
@pytest.mark.integrationtest
def test_dsheetpilingmodel_add_profile_invalid_stage_id_raises_ValueError(
self, _model: DSheetPilingModel, make_profile: Callable
):
profile = make_profile(name=_PROFILE_TEST_NAME)
invalid_stage_id = (
len(_model.datastructure.input_data.construction_stages.stages) + 1
)
with pytest.raises(
ValueError, match=r"Stage \d+ is not added to the internal datastructure"
):
_model.add_profile(profile=profile, side=Side.BOTH, stage_id=invalid_stage_id)
@pytest.mark.integrationtest
@pytest.mark.parametrize(
"side",
[
pytest.param(Side.LEFT, id="Left side"),
pytest.param(Side.RIGHT, id="Right side"),
pytest.param(Side.BOTH, id="Both sides"),
],
)
def test_dsheetpilingmodel_add_profile_internal_soil_profiles_updated(
self, _model: DSheetPilingModel, make_profile: Callable, side: Side
):
"""Validate if profile is refered in [SOIL PROFILES]"""
profile_name = _PROFILE_TEST_NAME
current_stage = _model.current_stage
assert profile_name != _DEFAULT_SOIL_PROFILE_NAME
profile = make_profile(profile_name)
_model.add_profile(profile=profile, side=side, stage_id=current_stage)
assert isinstance(_model.datastructure.input_data.soil_profiles, SoilProfiles)
assert len(_model.datastructure.input_data.soil_profiles.soil_profiles) == 1
internal = _model.datastructure.input_data.soil_profiles.soil_profiles[
current_stage
]
assert isinstance(internal, InternalProfile)
assert internal.name == _PROFILE_TEST_NAME
assert len(profile.layers) == len(internal.layers)
for layer, internal_layer in zip(profile.layers, internal.layers):
assert layer.dict() == internal_layer.dict()
assert profile.coordinate.x == internal.coordinate.x
assert profile.coordinate.y == internal.coordinate.y
@pytest.mark.integrationtest
@pytest.mark.parametrize(
"side, left_profile_name, right_profile_name",
[
pytest.param(
Side.LEFT, "Left profile", _DEFAULT_SOIL_PROFILE_NAME, id="Left side"
),
pytest.param(
Side.RIGHT, _DEFAULT_SOIL_PROFILE_NAME, "Right profile", id="Right side"
),
pytest.param(Side.BOTH, "Same profile", "Same profile", id="Both sides"),
],
)
def test_dsheetpilingmodel_add_profile_internal_construction_stages(
self,
_model: DSheetPilingModel,
make_profile: Callable,
side: Side,
left_profile_name: str,
right_profile_name: str,
):
"""Validate if profile is refered in [CONSTRUCTION STAGES]"""
current_stage = _model.current_stage
if side == Side.LEFT:
assert left_profile_name != _DEFAULT_SOIL_PROFILE_NAME
profile = make_profile(left_profile_name)
elif side == Side.RIGHT:
assert right_profile_name != _DEFAULT_SOIL_PROFILE_NAME
profile = make_profile(right_profile_name)
else:
assert left_profile_name == right_profile_name != _DEFAULT_SOIL_PROFILE_NAME
profile = make_profile(right_profile_name)
_model.add_profile(profile=profile, side=side, stage_id=current_stage)
assert (
_model.datastructure.input_data.construction_stages.stages[
current_stage
].soil_profile_left
== left_profile_name
)
assert (
_model.datastructure.input_data.construction_stages.stages[
current_stage
].soil_profile_right
== right_profile_name
)
| 35.990881 | 101 | 0.638966 | from contextlib import nullcontext as does_not_raise
from typing import Callable, List
import pytest
from pydantic import ValidationError
from geolib.geometry.one import Point
from geolib.models.dsheetpiling.dsheetpiling_model import DSheetPilingModel
from geolib.models.dsheetpiling.internal import _DEFAULT_SOIL_PROFILE_NAME
from geolib.models.dsheetpiling.internal import SoilProfile as InternalProfile
from geolib.models.dsheetpiling.internal import SoilProfiles
from geolib.models.dsheetpiling.profiles import SoilLayer, SoilProfile
from geolib.models.dsheetpiling.settings import (
CurveSettings,
LateralEarthPressureMethodStage,
ModulusReactionType,
PassiveSide,
Side,
)
from geolib.soils import Soil
_SOIL_TEST_NAME_1: str = "Clay"
_SOIL_TEST_NAME_2: str = "Sand"
_PROFILE_TEST_NAME: str = "test profiel"
_PROFILE_TEST_COORDINATES: Point = Point(x=100, y=250)
_TEST_LAYERS: List[SoilLayer] = [
SoilLayer(top_of_layer=0, soil=_SOIL_TEST_NAME_1),
SoilLayer(top_of_layer=-2, soil=_SOIL_TEST_NAME_2),
]
@pytest.fixture
def _model() -> DSheetPilingModel:
model = DSheetPilingModel()
model.add_stage(
name="Initial stage",
passive_side=PassiveSide.DSHEETPILING_DETERMINED,
method_left=LateralEarthPressureMethodStage.KA_KO_KP,
method_right=LateralEarthPressureMethodStage.KA_KO_KP,
)
model.add_soil(Soil(name=_SOIL_TEST_NAME_1))
model.add_soil(Soil(name=_SOIL_TEST_NAME_2))
return model
@pytest.fixture
def make_profile() -> SoilProfile:
def _make_profile(name: str):
return SoilProfile(
name=name, layers=_TEST_LAYERS, coordinate=_PROFILE_TEST_COORDINATES
)
return _make_profile
class TestCurveSettings:
@pytest.mark.unittest
@pytest.mark.parametrize(
"modulus_reaction_type",
[
pytest.param(ModulusReactionType.TANGENT),
pytest.param(ModulusReactionType.SECANT),
],
)
@pytest.mark.parametrize(
"use_unloading_reloading_curve",
[
pytest.param(True),
pytest.param(False),
],
)
@pytest.mark.parametrize(
"curve_number,raising_context",
[
pytest.param(
0,
pytest.raises(
ValidationError,
match=r"ensure this value is greater than or equal to 1",
),
id="Lower than allowed",
),
pytest.param(1, does_not_raise()),
pytest.param(2, does_not_raise()),
pytest.param(3, does_not_raise()),
pytest.param(4, does_not_raise()),
pytest.param(
5,
pytest.raises(
ValidationError, match=r"ensure this value is less than or equal to 4"
),
id="Higher than allowed",
),
],
)
def test_curve_settings(
self,
modulus_reaction_type: ModulusReactionType,
use_unloading_reloading_curve: bool,
curve_number: int,
raising_context,
):
with raising_context:
CurveSettings(
modulus_reaction_type=modulus_reaction_type,
use_unloading_reloading_curve=use_unloading_reloading_curve,
curve_number=curve_number,
)
@pytest.mark.integrationtest
@pytest.mark.parametrize(
"modulus_reaction_type",
[
pytest.param(ModulusReactionType.TANGENT),
pytest.param(ModulusReactionType.SECANT),
],
)
@pytest.mark.parametrize(
"use_unloading_reloading_curve",
[
pytest.param(True),
pytest.param(False),
],
)
@pytest.mark.parametrize(
"curve_number",
[pytest.param(1), pytest.param(2), pytest.param(3), pytest.param(4)],
)
def test_dsheetpilingmodel_add_curve_settings_internal_soil_profiles_updated(
self,
_model: DSheetPilingModel,
modulus_reaction_type: ModulusReactionType,
use_unloading_reloading_curve: bool,
curve_number: int,
):
curve_settings = CurveSettings(
modulus_reaction_type=modulus_reaction_type,
use_unloading_reloading_curve=use_unloading_reloading_curve,
curve_number=curve_number,
)
_model.set_curve_settings(curve_settings=curve_settings)
assert isinstance(_model.datastructure.input_data.soil_profiles, SoilProfiles)
assert (
_model.datastructure.input_data.soil_profiles.modulus_reaction_type
== modulus_reaction_type.value
)
assert (
_model.datastructure.input_data.soil_profiles.use_unloading_reloading_curve
== use_unloading_reloading_curve
)
assert _model.datastructure.input_data.soil_profiles.curve_number == curve_number
class TestSoilProfile:
@pytest.mark.unittest
@pytest.mark.parametrize(
"layers,raise_context",
[
pytest.param(
[SoilLayer(top_of_layer=0, soil=_SOIL_TEST_NAME_1)],
does_not_raise(),
id="Single layer",
),
pytest.param(
[
SoilLayer(top_of_layer=0, soil=_SOIL_TEST_NAME_1),
SoilLayer(top_of_layer=-2, soil=_SOIL_TEST_NAME_2),
],
does_not_raise(),
id="Multiple layers",
),
pytest.param(
[],
pytest.raises(
ValidationError, match=r"ensure this value has at least 1 items"
),
id="No layers",
),
pytest.param(
[
SoilLayer(top_of_layer=0, soil=_SOIL_TEST_NAME_1),
SoilLayer(top_of_layer=2, soil=_SOIL_TEST_NAME_2),
],
pytest.raises(
ValidationError,
match=r"Top of layer must decrease with each layer along depth. Top of layers: ",
),
id="Multiple layers, top of layer not decreasing",
),
],
)
def test_profile_initialization_with_different_points_arguments(
self, layers: List[SoilLayer], raise_context
):
with raise_context:
profile = SoilProfile(name=_PROFILE_TEST_NAME, layers=layers)
try:
internal = profile.to_internal()
except UnboundLocalError:
return
assert profile.name == internal.name
assert profile.coordinate.x == internal.coordinate.x
assert profile.coordinate.y == internal.coordinate.y
assert profile.coordinate.z == internal.coordinate.z
assert len(profile.layers) == len(internal.layers)
for layer, internal_layer in zip(profile.layers, internal.layers):
assert layer.top_of_layer == internal_layer.top_of_layer
assert layer.soil == internal_layer.soil
assert layer.water_pressure_top == internal_layer.water_pressure_top
assert layer.water_pressure_bottom == internal_layer.water_pressure_bottom
@pytest.mark.integrationtest
def test_dsheetpilingmodel_add_profile_soil_unknown_raises_ValueError(
self, _model: DSheetPilingModel
):
current_stage = _model.current_stage
name_not_in_soil_collection = "not in soil collection"
layers = [SoilLayer(top_of_layer=0, soil=name_not_in_soil_collection)]
profile = SoilProfile(name=_PROFILE_TEST_NAME, layers=layers)
assert (
name_not_in_soil_collection
not in _model.datastructure.input_data.soil_collection.soil_names
)
with pytest.raises(ValueError):
_model.add_profile(profile=profile, side=Side.BOTH, stage_id=current_stage)
@pytest.mark.integrationtest
def test_dsheetpilingmodel_add_profile_invalid_stage_id_raises_ValueError(
self, _model: DSheetPilingModel, make_profile: Callable
):
profile = make_profile(name=_PROFILE_TEST_NAME)
invalid_stage_id = (
len(_model.datastructure.input_data.construction_stages.stages) + 1
)
with pytest.raises(
ValueError, match=r"Stage \d+ is not added to the internal datastructure"
):
_model.add_profile(profile=profile, side=Side.BOTH, stage_id=invalid_stage_id)
@pytest.mark.integrationtest
@pytest.mark.parametrize(
"side",
[
pytest.param(Side.LEFT, id="Left side"),
pytest.param(Side.RIGHT, id="Right side"),
pytest.param(Side.BOTH, id="Both sides"),
],
)
def test_dsheetpilingmodel_add_profile_internal_soil_profiles_updated(
self, _model: DSheetPilingModel, make_profile: Callable, side: Side
):
profile_name = _PROFILE_TEST_NAME
current_stage = _model.current_stage
assert profile_name != _DEFAULT_SOIL_PROFILE_NAME
profile = make_profile(profile_name)
_model.add_profile(profile=profile, side=side, stage_id=current_stage)
assert isinstance(_model.datastructure.input_data.soil_profiles, SoilProfiles)
assert len(_model.datastructure.input_data.soil_profiles.soil_profiles) == 1
internal = _model.datastructure.input_data.soil_profiles.soil_profiles[
current_stage
]
assert isinstance(internal, InternalProfile)
assert internal.name == _PROFILE_TEST_NAME
assert len(profile.layers) == len(internal.layers)
for layer, internal_layer in zip(profile.layers, internal.layers):
assert layer.dict() == internal_layer.dict()
assert profile.coordinate.x == internal.coordinate.x
assert profile.coordinate.y == internal.coordinate.y
@pytest.mark.integrationtest
@pytest.mark.parametrize(
"side, left_profile_name, right_profile_name",
[
pytest.param(
Side.LEFT, "Left profile", _DEFAULT_SOIL_PROFILE_NAME, id="Left side"
),
pytest.param(
Side.RIGHT, _DEFAULT_SOIL_PROFILE_NAME, "Right profile", id="Right side"
),
pytest.param(Side.BOTH, "Same profile", "Same profile", id="Both sides"),
],
)
def test_dsheetpilingmodel_add_profile_internal_construction_stages(
self,
_model: DSheetPilingModel,
make_profile: Callable,
side: Side,
left_profile_name: str,
right_profile_name: str,
):
current_stage = _model.current_stage
if side == Side.LEFT:
assert left_profile_name != _DEFAULT_SOIL_PROFILE_NAME
profile = make_profile(left_profile_name)
elif side == Side.RIGHT:
assert right_profile_name != _DEFAULT_SOIL_PROFILE_NAME
profile = make_profile(right_profile_name)
else:
assert left_profile_name == right_profile_name != _DEFAULT_SOIL_PROFILE_NAME
profile = make_profile(right_profile_name)
_model.add_profile(profile=profile, side=side, stage_id=current_stage)
assert (
_model.datastructure.input_data.construction_stages.stages[
current_stage
].soil_profile_left
== left_profile_name
)
assert (
_model.datastructure.input_data.construction_stages.stages[
current_stage
].soil_profile_right
== right_profile_name
)
| true | true |
f7fdd607971950a60b86a93c23218877e186c5cc | 15,243 | py | Python | onnxruntime/core/providers/nuphar/scripts/model_quantizer.py | toothache/onnxruntime | 217b2c9f931b5b0f704df0c8336def47025d2148 | [
"MIT"
] | 1 | 2021-09-15T08:20:39.000Z | 2021-09-15T08:20:39.000Z | onnxruntime/core/providers/nuphar/scripts/model_quantizer.py | toothache/onnxruntime | 217b2c9f931b5b0f704df0c8336def47025d2148 | [
"MIT"
] | 1 | 2021-03-01T23:16:36.000Z | 2021-03-01T23:16:36.000Z | onnxruntime/core/providers/nuphar/scripts/model_quantizer.py | toothache/onnxruntime | 217b2c9f931b5b0f704df0c8336def47025d2148 | [
"MIT"
] | 3 | 2019-01-08T12:19:04.000Z | 2020-05-09T21:33:12.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import argparse
from enum import Enum
import json
import numpy as np
import onnx
from onnx import helper, numpy_helper
from .node_factory import NodeFactory, ensure_opset
class QuantizeConfig:
def __init__(self, signed, reserved_bits, type_bits):
self.sign_bit_ = 1 if signed else 0
self.reserved_bits_ = reserved_bits
self.type_bits_ = type_bits
@staticmethod
def from_dict(qcfg_dict):
return QuantizeConfig(1 if qcfg_dict['QuantizationType'] == 'Signed' else 0,
qcfg_dict['ReservedBit'],
qcfg_dict['QuantizeBit'])
def signed(self):
return self.sign_bit_ == 1
def usable_bits(self):
return self.type_bits_ - self.reserved_bits_
def q_max(self):
return float((1 << (self.usable_bits() - self.sign_bit_)) - 1)
def q_min(self):
return float(-(self.q_max() + 1) if self.signed() else 0)
def q_range(self):
return self.q_max() + 0.5 if self.signed() else float(1 << self.usable_bits())
def q_type(self):
if self.type_bits_ == 8:
return np.int8 if self.sign_bit_ else np.uint8
else:
assert self.type_bits_ == 16
return np.int16 if self.sign_bit_ else np.uint16
def q_type_bits(self):
return self.type_bits_
def __iter__(self): # need this to make dict for json
return iter([('QuantizeBit', self.type_bits_),
('QuantizationType', 'Signed' if self.sign_bit_ else 'Unsigned'),
('ReservedBit', self.reserved_bits_)])
def quantize_matmul_2d_with_weight(in_node, in_graph, nf, converted_weights, quantized_inputs, qcfg_dict, update_qcfg_dict, default_qcfg, onnx_opset_ver):
assert in_node.op_type == 'MatMul'
# quantize weight
# only handles weight being inputs[1] of MatMul/Gemm node
fparam_name = in_node.input[1]
# skip if weights shared by other nodes that's not MatMul
# TODO: support GEMM op if needed
other_nodes = [n for n in in_graph.node if n != in_node and fparam_name in n.input and n.op_type != 'MatMul']
if other_nodes:
return False
if in_node.output[0] in qcfg_dict:
node_qcfg = qcfg_dict[in_node.output[0]]
else:
node_qcfg = None
if not node_qcfg:
if not update_qcfg_dict and qcfg_dict:
# when qcfg_dict is readonly, raise warning if qcfg is not found for this node
print("Warning: qcfg is not found for node with output: " + in_node.output[0] + ", fall back to default qcfg.")
node_qcfg = default_qcfg
w_qcfg = QuantizeConfig.from_dict(node_qcfg['W'])
x_qcfg = QuantizeConfig.from_dict(node_qcfg['X'])
symmetric = node_qcfg['Symmetric']
# for symmetric quantization, both weight and input should be quantized to signed
assert not symmetric or (w_qcfg.signed() and x_qcfg.signed())
# quantize_type should match between weight and input
assert w_qcfg.q_type_bits() == x_qcfg.q_type_bits()
if fparam_name in converted_weights:
step, base, qparam_rowsum, qparam, w_qcfg1, symmetric1 = converted_weights[fparam_name]
# for shared weights, node should use the same kind of quantization
assert dict(w_qcfg1) == dict(w_qcfg)
assert symmetric1 == symmetric
else:
fparam = nf.get_initializer(fparam_name)
if fparam is None or len(fparam.shape) != 2:
return False
q_range = w_qcfg.q_range()
if symmetric:
fscale = np.amax(np.abs(fparam), axis=0)
step = fscale / q_range
base = 0
else:
fmin = np.amin(fparam, axis=0)
fmax = np.amax(fparam, axis=0)
fscale = (fmax - fmin)/(2 if w_qcfg.signed() else 1) # signed would be normalized to [-1, 1], and unsigned to [0, 1]
step = fscale / q_range
base = (fmax + fmin + step) * 0.5 if w_qcfg.signed() else fmin
fparam_norm = np.zeros_like(fparam)
expand_fscale = np.expand_dims(fscale,0)
np.divide((fparam - np.expand_dims(base,0)), expand_fscale, out=fparam_norm, where=expand_fscale!=0)
qparam = np.round(fparam_norm * q_range)
qparam = np.clip(qparam, w_qcfg.q_min(), w_qcfg.q_max())
qparam_rowsum = np.sum(qparam, axis=0)
qparam = qparam.astype(w_qcfg.q_type())
# create new weights in main graph in case other Scans share via converted_weights
nf.make_initializer(step, fparam_name + '_step', in_main_graph=True)
nf.make_initializer(qparam, fparam_name + '_qparam', in_main_graph=True)
step = fparam_name + '_step'
qparam = fparam_name + '_qparam'
if symmetric:
# no need to compute qparam_rowsum and base for symmetric quantization
base = None
qparam_rowsum = None
else:
nf.make_initializer(base, fparam_name + '_base', in_main_graph=True)
base = fparam_name + '_base'
nf.make_initializer(qparam_rowsum, fparam_name + '_qparam_rowsum', in_main_graph=True)
qparam_rowsum = fparam_name + '_qparam_rowsum'
converted_weights[fparam_name] = (step, base, qparam_rowsum, qparam, w_qcfg, symmetric)
nf.remove_initializer(fparam_name)
# quantize input
with nf.scoped_prefix(in_node.name) as scoped_prefix:
input_dim = nf.get_initializer(qparam).shape[0]
X = in_node.input[0]
if quantized_inputs is not None:
quantized_inputs_key = '{}_{}_{}'.format(X, symmetric, '|'.join(['{}:{}'.format(k,v) for (k, v) in x_qcfg]))
if quantized_inputs is not None and quantized_inputs_key in quantized_inputs:
scale_X, bias_X, Q_X, Q_X_sum_int32 = quantized_inputs[quantized_inputs_key]
else:
if symmetric:
delta_X = nf.make_node('ReduceMax', nf.make_node('Abs', X), {'axes':[-1]}) # keepdims = 1
inv_delta_X = nf.make_node('Reciprocal', delta_X)
norm_X = nf.make_node('Mul', [X, inv_delta_X])
bias_X = None
assert x_qcfg.signed()
else:
reduce_max_X = nf.make_node('ReduceMax', X, {'axes':[-1]}) # keepdims = 1
bias_X = nf.make_node('ReduceMin', X, {'axes':[-1]})
delta_X = nf.make_node('Sub', [reduce_max_X, bias_X])
inv_delta_X = nf.make_node('Reciprocal', delta_X)
norm_X = nf.make_node('Mul', [nf.make_node('Sub', [X, bias_X]), inv_delta_X])
scale_X = nf.make_node('Mul', [delta_X, np.asarray(1.0 / x_qcfg.q_range()).astype(np.float32)])
Q_Xf = nf.make_node('Mul', [norm_X, np.asarray(x_qcfg.q_range()).astype(np.float32)])
Q_Xf = nf.make_node('Add', [Q_Xf, np.asarray(0.5).astype(np.float32)])
Q_Xf = nf.make_node('Floor', Q_Xf)
if onnx_opset_ver < 11:
Q_Xf = nf.make_node('Clip', Q_Xf, {'max':x_qcfg.q_max(), 'min':x_qcfg.q_min()})
else:
# Clip changed min max to inputs in opset 11
Q_Xf = nf.make_node('Clip', [Q_Xf, np.asarray(x_qcfg.q_min()).astype(np.float32), np.asarray(x_qcfg.q_max()).astype(np.float32)])
Q_X = nf.make_node('Cast', Q_Xf, {'to':int({np.uint8 : onnx.TensorProto.UINT8,
np.int8 : onnx.TensorProto.INT8,
np.uint16 : onnx.TensorProto.UINT16,
np.int16 : onnx.TensorProto.INT16}[x_qcfg.q_type()])})
if symmetric:
Q_X_sum_int32 = None
else:
Q_X_sum_int32 = nf.make_node_with_axes('ReduceSum', nf.make_node('Cast', Q_X, {'to':int(onnx.TensorProto.INT32)}), [-1], onnx_opset_ver)
if quantized_inputs is not None:
quantized_inputs[quantized_inputs_key] = (scale_X, bias_X, Q_X, Q_X_sum_int32)
# MatMulInteger
if x_qcfg.q_type_bits() == 8:
Q_Y = nf.make_node('MatMulInteger', [Q_X, qparam])
else:
Q_Y = nf.make_node('MatMulInteger16', [Q_X, qparam])
Q_Y.domain = "com.microsoft"
# Dequantize
Y = in_node.output[0]
if symmetric:
nf.make_node('Mul',
[nf.make_node('Mul', [step, scale_X]),
nf.make_node('Cast', Q_Y, {'to': int(onnx.TensorProto.FLOAT)})],
output_names=Y)
else:
o0 = nf.make_node('Mul', [nf.make_node('Mul', [step, scale_X]),
nf.make_node('Cast', Q_Y, {'to': int(onnx.TensorProto.FLOAT)})])
o1 = nf.make_node('Mul', [nf.make_node('Mul', [step, bias_X]), qparam_rowsum])
o2 = nf.make_node('Mul', [base, nf.make_node('Mul', [scale_X, nf.make_node('Cast', Q_X_sum_int32, {'to':int(onnx.TensorProto.FLOAT)})])])
o3 = nf.make_node('Mul', [base, nf.make_node('Mul', [bias_X, np.asarray(float(input_dim)).astype(np.float32)])])
nf.make_node('Sum', [o3, o2, o1, o0], output_names=Y)
if update_qcfg_dict:
qcfg_dict[in_node.output[0]] = node_qcfg
return True
def upgrade_op(nf, in_n):
if in_n.op_type == 'Slice' and len(in_n.input) == 1:
# convert opset9 Slice to opset10
with nf.scoped_prefix(in_n.name) as scoped_prefix:
slice_inputs = [in_n.input[0],
np.asarray(NodeFactory.get_attribute(in_n,'starts')).astype(np.int64),
np.asarray(NodeFactory.get_attribute(in_n,'ends')).astype(np.int64),
np.asarray(NodeFactory.get_attribute(in_n,'axes')).astype(np.int64)]
nf.make_node('Slice', slice_inputs, output_names=list(in_n.output))
return True
elif in_n.op_type == 'TopK' and len(in_n.input) == 1:
# convert opset1 TopK to opset10
with nf.scoped_prefix(in_n.name) as scoped_prefix:
topk_inputs = [in_n.input[0],
np.asarray([NodeFactory.get_attribute(in_n,'k')]).astype(np.int64)]
nf.make_node('TopK', topk_inputs, {'axis':NodeFactory.get_attribute(in_n,'axis',-1)}, output_names=list(in_n.output))
return True
else:
return False
# quantize matmul to MatMulInteger using asymm uint8
def convert_matmul_model(input_model, output_model, only_for_scan=False, share_input_quantization=False, preset_str='asymm8_param0_input1', qcfg_json=None, export_qcfg_json=None):
preset_qcfgs = {'asymm8_param0_input1' : {'W' : dict(QuantizeConfig(signed=1, reserved_bits=0, type_bits=8)),
'X' : dict(QuantizeConfig(signed=0, reserved_bits=1, type_bits=8)),
'Symmetric' : 0},
'symm16_param3_input3' : {'W' : dict(QuantizeConfig(signed=1, reserved_bits=3, type_bits=16)),
'X' : dict(QuantizeConfig(signed=1, reserved_bits=3, type_bits=16)),
'Symmetric' : 1}}
default_qcfg = preset_qcfgs[preset_str]
in_mp = onnx.load(input_model)
qcfg_dict = {}
if qcfg_json and not export_qcfg_json:
with open(qcfg_json, 'r') as f:
qcfg_dict = json.load(f)
out_mp = onnx.ModelProto()
out_mp.CopyFrom(in_mp)
out_mp.ir_version = 5 # update ir version to avoid requirement of initializer in graph input
onnx_opset_ver = ensure_opset(out_mp, 10) # bump up to ONNX opset 10, which is required for MatMulInteger
ensure_opset(out_mp, 1, 'com.microsoft') # add MS domain for MatMulInteger16
out_mp.graph.ClearField('node')
nf = NodeFactory(out_mp.graph)
converted_weights = {} # remember MatMul weights that have been converted, in case of sharing
quantized_inputs = {} if share_input_quantization else None # remember quantized inputs that might be able to share between MatMuls
for in_n in in_mp.graph.node:
if upgrade_op(nf, in_n):
continue
if in_n.op_type == 'MatMul' and not only_for_scan:
if quantize_matmul_2d_with_weight(in_n, in_mp.graph, nf, converted_weights, quantized_inputs, qcfg_dict, export_qcfg_json, default_qcfg, onnx_opset_ver):
continue
out_n = out_mp.graph.node.add()
out_n.CopyFrom(in_n)
if in_n.op_type == 'Scan' or in_n.op_type == 'Loop':
in_subgraph = NodeFactory.get_attribute(in_n, 'body')
out_subgraph = NodeFactory.get_attribute(out_n, 'body')
out_subgraph.ClearField('node')
scan_nf = NodeFactory(out_mp.graph, out_subgraph)
subgraph_quantized_inputs = {} if share_input_quantization else None # remember quantized inputs that might be able to share between MatMuls
for in_sn in in_subgraph.node:
if in_sn.op_type == 'MatMul':
if quantize_matmul_2d_with_weight(in_sn, in_subgraph, scan_nf, converted_weights, subgraph_quantized_inputs, qcfg_dict, export_qcfg_json, default_qcfg, onnx_opset_ver):
continue
if upgrade_op(scan_nf, in_sn):
continue
out_sn = out_subgraph.node.add()
out_sn.CopyFrom(in_sn)
onnx.save(out_mp, output_model)
if export_qcfg_json:
with open(qcfg_json, 'w') as f:
f.write(json.dumps(qcfg_dict, indent=2))
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, help='The input model file')
parser.add_argument('--output', required=True, help='The output model file')
parser.add_argument('--default_qcfg', help='The preset of quantization of <asymm|symm><qbits>_param<reserve_bit>_input<reserve_bit>', choices=['asymm8_param0_input1', 'symm16_param3_input3'], default='asymm8_param0_input1')
parser.add_argument('--qcfg_json', help='The quantization config json file for read or write.', default=None)
parser.add_argument('--export_qcfg_json', help='If set, write default quantization config to qcfg_json file.', action='store_true', default=False)
parser.add_argument('--only_for_scan', help='If set, apply quantization of MatMul only inside scan', action='store_true', default=False)
parser.add_argument('--share_input_quantization', help='If set, allow input quantization to be shared if the same input is used in multiple MatMul', action='store_true', default=False)
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
print('input model: ' + args.input)
print('output model ' + args.output)
print('Quantize MatMul to MatMulInteger...')
assert not args.export_qcfg_json or args.qcfg_json, "--qcfg_json must be specified when --export_qcfg_json is used"
convert_matmul_model(args.input, args.output, args.only_for_scan, args.share_input_quantization, args.default_qcfg, args.qcfg_json, args.export_qcfg_json)
print('Done!')
| 50.641196 | 225 | 0.627698 |
import argparse
from enum import Enum
import json
import numpy as np
import onnx
from onnx import helper, numpy_helper
from .node_factory import NodeFactory, ensure_opset
class QuantizeConfig:
def __init__(self, signed, reserved_bits, type_bits):
self.sign_bit_ = 1 if signed else 0
self.reserved_bits_ = reserved_bits
self.type_bits_ = type_bits
@staticmethod
def from_dict(qcfg_dict):
return QuantizeConfig(1 if qcfg_dict['QuantizationType'] == 'Signed' else 0,
qcfg_dict['ReservedBit'],
qcfg_dict['QuantizeBit'])
def signed(self):
return self.sign_bit_ == 1
def usable_bits(self):
return self.type_bits_ - self.reserved_bits_
def q_max(self):
return float((1 << (self.usable_bits() - self.sign_bit_)) - 1)
def q_min(self):
return float(-(self.q_max() + 1) if self.signed() else 0)
def q_range(self):
return self.q_max() + 0.5 if self.signed() else float(1 << self.usable_bits())
def q_type(self):
if self.type_bits_ == 8:
return np.int8 if self.sign_bit_ else np.uint8
else:
assert self.type_bits_ == 16
return np.int16 if self.sign_bit_ else np.uint16
def q_type_bits(self):
return self.type_bits_
def __iter__(self):
return iter([('QuantizeBit', self.type_bits_),
('QuantizationType', 'Signed' if self.sign_bit_ else 'Unsigned'),
('ReservedBit', self.reserved_bits_)])
def quantize_matmul_2d_with_weight(in_node, in_graph, nf, converted_weights, quantized_inputs, qcfg_dict, update_qcfg_dict, default_qcfg, onnx_opset_ver):
assert in_node.op_type == 'MatMul'
fparam_name = in_node.input[1]
# TODO: support GEMM op if needed
other_nodes = [n for n in in_graph.node if n != in_node and fparam_name in n.input and n.op_type != 'MatMul']
if other_nodes:
return False
if in_node.output[0] in qcfg_dict:
node_qcfg = qcfg_dict[in_node.output[0]]
else:
node_qcfg = None
if not node_qcfg:
if not update_qcfg_dict and qcfg_dict:
# when qcfg_dict is readonly, raise warning if qcfg is not found for this node
print("Warning: qcfg is not found for node with output: " + in_node.output[0] + ", fall back to default qcfg.")
node_qcfg = default_qcfg
w_qcfg = QuantizeConfig.from_dict(node_qcfg['W'])
x_qcfg = QuantizeConfig.from_dict(node_qcfg['X'])
symmetric = node_qcfg['Symmetric']
# for symmetric quantization, both weight and input should be quantized to signed
assert not symmetric or (w_qcfg.signed() and x_qcfg.signed())
# quantize_type should match between weight and input
assert w_qcfg.q_type_bits() == x_qcfg.q_type_bits()
if fparam_name in converted_weights:
step, base, qparam_rowsum, qparam, w_qcfg1, symmetric1 = converted_weights[fparam_name]
# for shared weights, node should use the same kind of quantization
assert dict(w_qcfg1) == dict(w_qcfg)
assert symmetric1 == symmetric
else:
fparam = nf.get_initializer(fparam_name)
if fparam is None or len(fparam.shape) != 2:
return False
q_range = w_qcfg.q_range()
if symmetric:
fscale = np.amax(np.abs(fparam), axis=0)
step = fscale / q_range
base = 0
else:
fmin = np.amin(fparam, axis=0)
fmax = np.amax(fparam, axis=0)
fscale = (fmax - fmin)/(2 if w_qcfg.signed() else 1) # signed would be normalized to [-1, 1], and unsigned to [0, 1]
step = fscale / q_range
base = (fmax + fmin + step) * 0.5 if w_qcfg.signed() else fmin
fparam_norm = np.zeros_like(fparam)
expand_fscale = np.expand_dims(fscale,0)
np.divide((fparam - np.expand_dims(base,0)), expand_fscale, out=fparam_norm, where=expand_fscale!=0)
qparam = np.round(fparam_norm * q_range)
qparam = np.clip(qparam, w_qcfg.q_min(), w_qcfg.q_max())
qparam_rowsum = np.sum(qparam, axis=0)
qparam = qparam.astype(w_qcfg.q_type())
# create new weights in main graph in case other Scans share via converted_weights
nf.make_initializer(step, fparam_name + '_step', in_main_graph=True)
nf.make_initializer(qparam, fparam_name + '_qparam', in_main_graph=True)
step = fparam_name + '_step'
qparam = fparam_name + '_qparam'
if symmetric:
# no need to compute qparam_rowsum and base for symmetric quantization
base = None
qparam_rowsum = None
else:
nf.make_initializer(base, fparam_name + '_base', in_main_graph=True)
base = fparam_name + '_base'
nf.make_initializer(qparam_rowsum, fparam_name + '_qparam_rowsum', in_main_graph=True)
qparam_rowsum = fparam_name + '_qparam_rowsum'
converted_weights[fparam_name] = (step, base, qparam_rowsum, qparam, w_qcfg, symmetric)
nf.remove_initializer(fparam_name)
# quantize input
with nf.scoped_prefix(in_node.name) as scoped_prefix:
input_dim = nf.get_initializer(qparam).shape[0]
X = in_node.input[0]
if quantized_inputs is not None:
quantized_inputs_key = '{}_{}_{}'.format(X, symmetric, '|'.join(['{}:{}'.format(k,v) for (k, v) in x_qcfg]))
if quantized_inputs is not None and quantized_inputs_key in quantized_inputs:
scale_X, bias_X, Q_X, Q_X_sum_int32 = quantized_inputs[quantized_inputs_key]
else:
if symmetric:
delta_X = nf.make_node('ReduceMax', nf.make_node('Abs', X), {'axes':[-1]}) # keepdims = 1
inv_delta_X = nf.make_node('Reciprocal', delta_X)
norm_X = nf.make_node('Mul', [X, inv_delta_X])
bias_X = None
assert x_qcfg.signed()
else:
reduce_max_X = nf.make_node('ReduceMax', X, {'axes':[-1]}) # keepdims = 1
bias_X = nf.make_node('ReduceMin', X, {'axes':[-1]})
delta_X = nf.make_node('Sub', [reduce_max_X, bias_X])
inv_delta_X = nf.make_node('Reciprocal', delta_X)
norm_X = nf.make_node('Mul', [nf.make_node('Sub', [X, bias_X]), inv_delta_X])
scale_X = nf.make_node('Mul', [delta_X, np.asarray(1.0 / x_qcfg.q_range()).astype(np.float32)])
Q_Xf = nf.make_node('Mul', [norm_X, np.asarray(x_qcfg.q_range()).astype(np.float32)])
Q_Xf = nf.make_node('Add', [Q_Xf, np.asarray(0.5).astype(np.float32)])
Q_Xf = nf.make_node('Floor', Q_Xf)
if onnx_opset_ver < 11:
Q_Xf = nf.make_node('Clip', Q_Xf, {'max':x_qcfg.q_max(), 'min':x_qcfg.q_min()})
else:
# Clip changed min max to inputs in opset 11
Q_Xf = nf.make_node('Clip', [Q_Xf, np.asarray(x_qcfg.q_min()).astype(np.float32), np.asarray(x_qcfg.q_max()).astype(np.float32)])
Q_X = nf.make_node('Cast', Q_Xf, {'to':int({np.uint8 : onnx.TensorProto.UINT8,
np.int8 : onnx.TensorProto.INT8,
np.uint16 : onnx.TensorProto.UINT16,
np.int16 : onnx.TensorProto.INT16}[x_qcfg.q_type()])})
if symmetric:
Q_X_sum_int32 = None
else:
Q_X_sum_int32 = nf.make_node_with_axes('ReduceSum', nf.make_node('Cast', Q_X, {'to':int(onnx.TensorProto.INT32)}), [-1], onnx_opset_ver)
if quantized_inputs is not None:
quantized_inputs[quantized_inputs_key] = (scale_X, bias_X, Q_X, Q_X_sum_int32)
# MatMulInteger
if x_qcfg.q_type_bits() == 8:
Q_Y = nf.make_node('MatMulInteger', [Q_X, qparam])
else:
Q_Y = nf.make_node('MatMulInteger16', [Q_X, qparam])
Q_Y.domain = "com.microsoft"
# Dequantize
Y = in_node.output[0]
if symmetric:
nf.make_node('Mul',
[nf.make_node('Mul', [step, scale_X]),
nf.make_node('Cast', Q_Y, {'to': int(onnx.TensorProto.FLOAT)})],
output_names=Y)
else:
o0 = nf.make_node('Mul', [nf.make_node('Mul', [step, scale_X]),
nf.make_node('Cast', Q_Y, {'to': int(onnx.TensorProto.FLOAT)})])
o1 = nf.make_node('Mul', [nf.make_node('Mul', [step, bias_X]), qparam_rowsum])
o2 = nf.make_node('Mul', [base, nf.make_node('Mul', [scale_X, nf.make_node('Cast', Q_X_sum_int32, {'to':int(onnx.TensorProto.FLOAT)})])])
o3 = nf.make_node('Mul', [base, nf.make_node('Mul', [bias_X, np.asarray(float(input_dim)).astype(np.float32)])])
nf.make_node('Sum', [o3, o2, o1, o0], output_names=Y)
if update_qcfg_dict:
qcfg_dict[in_node.output[0]] = node_qcfg
return True
def upgrade_op(nf, in_n):
if in_n.op_type == 'Slice' and len(in_n.input) == 1:
# convert opset9 Slice to opset10
with nf.scoped_prefix(in_n.name) as scoped_prefix:
slice_inputs = [in_n.input[0],
np.asarray(NodeFactory.get_attribute(in_n,'starts')).astype(np.int64),
np.asarray(NodeFactory.get_attribute(in_n,'ends')).astype(np.int64),
np.asarray(NodeFactory.get_attribute(in_n,'axes')).astype(np.int64)]
nf.make_node('Slice', slice_inputs, output_names=list(in_n.output))
return True
elif in_n.op_type == 'TopK' and len(in_n.input) == 1:
# convert opset1 TopK to opset10
with nf.scoped_prefix(in_n.name) as scoped_prefix:
topk_inputs = [in_n.input[0],
np.asarray([NodeFactory.get_attribute(in_n,'k')]).astype(np.int64)]
nf.make_node('TopK', topk_inputs, {'axis':NodeFactory.get_attribute(in_n,'axis',-1)}, output_names=list(in_n.output))
return True
else:
return False
# quantize matmul to MatMulInteger using asymm uint8
def convert_matmul_model(input_model, output_model, only_for_scan=False, share_input_quantization=False, preset_str='asymm8_param0_input1', qcfg_json=None, export_qcfg_json=None):
preset_qcfgs = {'asymm8_param0_input1' : {'W' : dict(QuantizeConfig(signed=1, reserved_bits=0, type_bits=8)),
'X' : dict(QuantizeConfig(signed=0, reserved_bits=1, type_bits=8)),
'Symmetric' : 0},
'symm16_param3_input3' : {'W' : dict(QuantizeConfig(signed=1, reserved_bits=3, type_bits=16)),
'X' : dict(QuantizeConfig(signed=1, reserved_bits=3, type_bits=16)),
'Symmetric' : 1}}
default_qcfg = preset_qcfgs[preset_str]
in_mp = onnx.load(input_model)
qcfg_dict = {}
if qcfg_json and not export_qcfg_json:
with open(qcfg_json, 'r') as f:
qcfg_dict = json.load(f)
out_mp = onnx.ModelProto()
out_mp.CopyFrom(in_mp)
out_mp.ir_version = 5 # update ir version to avoid requirement of initializer in graph input
onnx_opset_ver = ensure_opset(out_mp, 10) # bump up to ONNX opset 10, which is required for MatMulInteger
ensure_opset(out_mp, 1, 'com.microsoft') # add MS domain for MatMulInteger16
out_mp.graph.ClearField('node')
nf = NodeFactory(out_mp.graph)
converted_weights = {} # remember MatMul weights that have been converted, in case of sharing
quantized_inputs = {} if share_input_quantization else None # remember quantized inputs that might be able to share between MatMuls
for in_n in in_mp.graph.node:
if upgrade_op(nf, in_n):
continue
if in_n.op_type == 'MatMul' and not only_for_scan:
if quantize_matmul_2d_with_weight(in_n, in_mp.graph, nf, converted_weights, quantized_inputs, qcfg_dict, export_qcfg_json, default_qcfg, onnx_opset_ver):
continue
out_n = out_mp.graph.node.add()
out_n.CopyFrom(in_n)
if in_n.op_type == 'Scan' or in_n.op_type == 'Loop':
in_subgraph = NodeFactory.get_attribute(in_n, 'body')
out_subgraph = NodeFactory.get_attribute(out_n, 'body')
out_subgraph.ClearField('node')
scan_nf = NodeFactory(out_mp.graph, out_subgraph)
subgraph_quantized_inputs = {} if share_input_quantization else None # remember quantized inputs that might be able to share between MatMuls
for in_sn in in_subgraph.node:
if in_sn.op_type == 'MatMul':
if quantize_matmul_2d_with_weight(in_sn, in_subgraph, scan_nf, converted_weights, subgraph_quantized_inputs, qcfg_dict, export_qcfg_json, default_qcfg, onnx_opset_ver):
continue
if upgrade_op(scan_nf, in_sn):
continue
out_sn = out_subgraph.node.add()
out_sn.CopyFrom(in_sn)
onnx.save(out_mp, output_model)
if export_qcfg_json:
with open(qcfg_json, 'w') as f:
f.write(json.dumps(qcfg_dict, indent=2))
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, help='The input model file')
parser.add_argument('--output', required=True, help='The output model file')
parser.add_argument('--default_qcfg', help='The preset of quantization of <asymm|symm><qbits>_param<reserve_bit>_input<reserve_bit>', choices=['asymm8_param0_input1', 'symm16_param3_input3'], default='asymm8_param0_input1')
parser.add_argument('--qcfg_json', help='The quantization config json file for read or write.', default=None)
parser.add_argument('--export_qcfg_json', help='If set, write default quantization config to qcfg_json file.', action='store_true', default=False)
parser.add_argument('--only_for_scan', help='If set, apply quantization of MatMul only inside scan', action='store_true', default=False)
parser.add_argument('--share_input_quantization', help='If set, allow input quantization to be shared if the same input is used in multiple MatMul', action='store_true', default=False)
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
print('input model: ' + args.input)
print('output model ' + args.output)
print('Quantize MatMul to MatMulInteger...')
assert not args.export_qcfg_json or args.qcfg_json, "--qcfg_json must be specified when --export_qcfg_json is used"
convert_matmul_model(args.input, args.output, args.only_for_scan, args.share_input_quantization, args.default_qcfg, args.qcfg_json, args.export_qcfg_json)
print('Done!')
| true | true |
f7fdd6e9085a256e0332aa09cdad5fd35513793d | 43,477 | py | Python | test/functional/wallet_importmulti.py | arnoldcho/superman | 2b581db736c2e5dbc7346b76a42d34ece92416fa | [
"MIT"
] | 128 | 2018-02-06T00:54:33.000Z | 2022-03-31T13:58:18.000Z | test/functional/wallet_importmulti.py | arnoldcho/superman | 2b581db736c2e5dbc7346b76a42d34ece92416fa | [
"MIT"
] | 101 | 2018-02-08T05:30:56.000Z | 2022-03-31T23:17:46.000Z | test/functional/wallet_importmulti.py | arnoldcho/superman | 2b581db736c2e5dbc7346b76a42d34ece92416fa | [
"MIT"
] | 93 | 2018-02-05T23:36:22.000Z | 2022-03-17T15:38:08.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.script import (
CScript,
OP_NOP,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import (
get_key,
get_multisig,
test_address,
)
class ImportMultiTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None, error_message=None, warnings=None):
"""Run importmulti and assert success"""
if warnings is None:
warnings = []
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address (implicit non-internal)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info("Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Unsuccessful labelling for internal addresses"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info("Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr, timestamp=timestamp, iswatchonly=True, ismine=False, solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Import P2WPKH address as watch only
self.log.info("Should import a P2WPKH address as watch only")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=True,
solvable=False)
# Import P2WPKH address with public key but no private key
self.log.info("Should import a P2WPKH address and public key as solvable but not spendable")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2wpkh_addr,
ismine=False,
solvable=True)
# Import P2WPKH address with key and check it is spendable
self.log.info("Should import a P2WPKH address with key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=False,
ismine=True)
# P2WSH multisig address without scripts or keys
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2wsh multisig as watch only without respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=False)
# Same P2WSH multisig address as above, but now with witnessscript + private keys
self.log.info("Should import a p2wsh with respective witness script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now",
"witnessscript": multisig.redeem_script,
"keys": multisig.privkeys},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=True,
ismine=True,
sigsrequired=2)
# P2SH-P2WPKH address with no redeemscript or public or private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh without redeem script or keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=False,
ismine=False)
# P2SH-P2WPKH address + redeemscript + public key with no private key
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and pubkey as solvable")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=False)
# P2SH-P2WPKH address + redeemscript + private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True)
# P2SH-P2WSH multisig + redeemscript with no private key
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2sh-p2wsh with respective redeem script but no private key")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_p2wsh_addr},
"timestamp": "now",
"redeemscript": multisig.p2wsh_script,
"witnessscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_p2wsh_addr,
solvable=True,
ismine=False)
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
self.log.info("Should not import a p2sh-p2wpkh address from descriptor without checksum and private key")
self.test_importmulti({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now",
"label": "Unsuccessful P2SH-P2WPKH descriptor import",
"keys": [key.privkey]},
success=False,
error_code=-5,
error_message="Missing checksum")
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
p2sh_p2wpkh_label = "Successful P2SH-P2WPKH descriptor import"
self.log.info("Should import a p2sh-p2wpkh address from descriptor and private key")
self.test_importmulti({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"label": p2sh_p2wpkh_label,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True,
labels=[p2sh_p2wpkh_label])
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
addresses += ["bcrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju7scl8gn", "bcrt1qfqeppuvj0ww98r6qghmdkj70tv8qpchehegrg8"] # wpkh subscripts corresponding to the above addresses
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info("Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor with xpriv
self.log.info("Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True)
for address in addresses:
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
# dump the private key to ensure it matches what was imported
privkey = self.nodes[1].dumpprivkey(address)
assert_equal(privkey, wif_priv)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
p2pkh_label = "P2PKH descriptor import"
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": p2pkh_label},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
labels=[p2pkh_label])
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info("Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info("Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info("Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress(address_type="bech32")
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + pub_fpr + pub_keypath[1:] +"]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress(address_type="bech32")
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(wallet_name="noprivkeys", disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress(address_type="bech32")
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress(address_type="bech32")
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey wallet
self.log.info("Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress(address_type="bech32")
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress(address_type="bech32")
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info('Imported scripts with pubkeys should not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wsh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info("Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc(self.default_wallet_name)
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], "Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv', # m/0'/0'/0
'bcrt1q8vprchan07gzagd5e6v9wd7azyucksq2xc76k8', # m/0'/0'/1
'bcrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjp9lulu', # m/0'/0'/2
'bcrt1qau64272ymawq26t90md6an0ps99qkrse58m640', # m/0'/0'/3
'bcrt1qsg97266hrh6cpmutqen8s4s962aryy77jp0fg0', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range' : [0, 4],
}]
)
for i in range(0, 5):
addr = wrpc.getnewaddress('', 'bech32')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
| 50.378911 | 316 | 0.556915 |
from test_framework.script import (
CScript,
OP_NOP,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import (
get_key,
get_multisig,
test_address,
)
class ImportMultiTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None, error_message=None, warnings=None):
if warnings is None:
warnings = []
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
assert_equal(node0_address1['ismine'], True)
assert_equal(self.nodes[1].getblockcount(), 1)
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
self.log.info("Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Unsuccessful labelling for internal addresses"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
self.log.info("Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
self.log.info("Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
self.log.info("Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
self.log.info("Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr, timestamp=timestamp, iswatchonly=True, ismine=False, solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
self.log.info("Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
self.log.info("Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
self.log.info("Should import a P2WPKH address as watch only")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=True,
solvable=False)
self.log.info("Should import a P2WPKH address and public key as solvable but not spendable")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2wpkh_addr,
ismine=False,
solvable=True)
self.log.info("Should import a P2WPKH address with key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=False,
ismine=True)
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2wsh multisig as watch only without respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=False)
self.log.info("Should import a p2wsh with respective witness script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now",
"witnessscript": multisig.redeem_script,
"keys": multisig.privkeys},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=True,
ismine=True,
sigsrequired=2)
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh without redeem script or keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=False,
ismine=False)
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and pubkey as solvable")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=False)
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True)
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2sh-p2wsh with respective redeem script but no private key")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_p2wsh_addr},
"timestamp": "now",
"redeemscript": multisig.p2wsh_script,
"witnessscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_p2wsh_addr,
solvable=True,
ismine=False)
key = get_key(self.nodes[0])
self.log.info("Should not import a p2sh-p2wpkh address from descriptor without checksum and private key")
self.test_importmulti({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now",
"label": "Unsuccessful P2SH-P2WPKH descriptor import",
"keys": [key.privkey]},
success=False,
error_code=-5,
error_message="Missing checksum")
key = get_key(self.nodes[0])
p2sh_p2wpkh_label = "Successful P2SH-P2WPKH descriptor import"
self.log.info("Should import a p2sh-p2wpkh address from descriptor and private key")
self.test_importmulti({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"label": p2sh_p2wpkh_label,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True,
labels=[p2sh_p2wpkh_label])
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"]
addresses += ["bcrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju7scl8gn", "bcrt1qfqeppuvj0ww98r6qghmdkj70tv8qpchehegrg8"]
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info("Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor with xpriv
self.log.info("Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True)
for address in addresses:
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
# dump the private key to ensure it matches what was imported
privkey = self.nodes[1].dumpprivkey(address)
assert_equal(privkey, wif_priv)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
p2pkh_label = "P2PKH descriptor import"
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": p2pkh_label},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
labels=[p2pkh_label])
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info("Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info("Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info("Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress(address_type="bech32")
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + pub_fpr + pub_keypath[1:] +"]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress(address_type="bech32")
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(wallet_name="noprivkeys", disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress(address_type="bech32")
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress(address_type="bech32")
assert_equal(addr2, newaddr2)
self.log.info("Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress(address_type="bech32")
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress(address_type="bech32")
assert_equal(addr2, newaddr2)
self.log.info('Imported scripts with pubkeys should not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wsh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info("Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc(self.default_wallet_name)
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], "Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv', # m/0'/0'/0
'bcrt1q8vprchan07gzagd5e6v9wd7azyucksq2xc76k8', # m/0'/0'/1
'bcrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjp9lulu', # m/0'/0'/2
'bcrt1qau64272ymawq26t90md6an0ps99qkrse58m640', # m/0'/0'/3
'bcrt1qsg97266hrh6cpmutqen8s4s962aryy77jp0fg0', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range' : [0, 4],
}]
)
for i in range(0, 5):
addr = wrpc.getnewaddress('', 'bech32')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
| true | true |
f7fdd74267cb0c7793d9b3516f920b3f7c301469 | 1,453 | py | Python | ALGO/graph/dp__floyd_warshall__all_pair_shortest_path.py | jainrocky/LORD | 15c923129f6d5e024f6606a7d6005adfc9f9af90 | [
"MIT"
] | 2 | 2020-09-24T21:24:26.000Z | 2021-11-03T09:15:40.000Z | ALGO/graph/dp__floyd_warshall__all_pair_shortest_path.py | jainrocky/LORD | 15c923129f6d5e024f6606a7d6005adfc9f9af90 | [
"MIT"
] | 4 | 2021-06-08T22:23:44.000Z | 2022-03-12T00:48:19.000Z | ALGO/graph/dp__floyd_warshall__all_pair_shortest_path.py | jainrocky/LORD | 15c923129f6d5e024f6606a7d6005adfc9f9af90 | [
"MIT"
] | 1 | 2021-02-22T06:11:38.000Z | 2021-02-22T06:11:38.000Z | import os, sys
inf = 2**31
def all_pair_shortest_path(matrix, printing=None):
n = len(matrix)
p_cache = [[None] * n for i in range(n)]
for mid in range(n):
for start in range(n):
for end in range(n):
temp = matrix[start][mid] + matrix[mid][end]
if temp < matrix[start][end] and matrix[start][mid]!=inf and matrix[mid][end]!=inf:
matrix[start][end] = temp
p_cache[start][end] = mid
if printing:
def print_path(start, end):
print(end, end='')
if matrix[start][end] != inf:
print(' < ', end='')
else:
print(' Not Exist ', end='')
if p_cache[start][end]:
print_path(start, p_cache[start][end])
if not p_cache[start][end]:
print(start)
for i in range(n):
for j in range(n):
print_path(i, j)
return matrix
if __name__=='__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'DS', 'graph'))
from graph import Graph
g = Graph(n_node=4)
g.add_edge(Graph.Edge( 0, 1, 5 ))
g.add_edge(Graph.Edge( 0, 3, 10 ))
g.add_edge(Graph.Edge( 1, 2, 3 ))
g.add_edge(Graph.Edge( 2, 3, 1 ))
cost_matrix = all_pair_shortest_path(g.matrix, printing=True)
print()
for c in cost_matrix:
print(*c)
| 30.914894 | 99 | 0.511356 | import os, sys
inf = 2**31
def all_pair_shortest_path(matrix, printing=None):
n = len(matrix)
p_cache = [[None] * n for i in range(n)]
for mid in range(n):
for start in range(n):
for end in range(n):
temp = matrix[start][mid] + matrix[mid][end]
if temp < matrix[start][end] and matrix[start][mid]!=inf and matrix[mid][end]!=inf:
matrix[start][end] = temp
p_cache[start][end] = mid
if printing:
def print_path(start, end):
print(end, end='')
if matrix[start][end] != inf:
print(' < ', end='')
else:
print(' Not Exist ', end='')
if p_cache[start][end]:
print_path(start, p_cache[start][end])
if not p_cache[start][end]:
print(start)
for i in range(n):
for j in range(n):
print_path(i, j)
return matrix
if __name__=='__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'DS', 'graph'))
from graph import Graph
g = Graph(n_node=4)
g.add_edge(Graph.Edge( 0, 1, 5 ))
g.add_edge(Graph.Edge( 0, 3, 10 ))
g.add_edge(Graph.Edge( 1, 2, 3 ))
g.add_edge(Graph.Edge( 2, 3, 1 ))
cost_matrix = all_pair_shortest_path(g.matrix, printing=True)
print()
for c in cost_matrix:
print(*c)
| true | true |
f7fdd81596ef38657be23c57b3df68fab7af5224 | 2,180 | py | Python | tests/test_pf_pixfmt_to_bpc.py | rtmigo/vtcff_py | e26a5e55ea455b10995932dccd319c1f7fc28385 | [
"MIT"
] | null | null | null | tests/test_pf_pixfmt_to_bpc.py | rtmigo/vtcff_py | e26a5e55ea455b10995932dccd319c1f7fc28385 | [
"MIT"
] | null | null | null | tests/test_pf_pixfmt_to_bpc.py | rtmigo/vtcff_py | e26a5e55ea455b10995932dccd319c1f7fc28385 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: (c) 2021 Artёm IG <github.com/rtmigo>
# SPDX-License-Identifier: MIT
import unittest
from vtcff._pf_15_pixfmt_subsampling import _three_digits
from vtcff._pf_20_pixfmt_bpc import _guess_bpc, _ending, _subsampling_factor
class TestGuess(unittest.TestCase):
def test(self):
self.assertEqual(_guess_bpc('yuva444p12be', 4, 48), 12)
self.assertEqual(_guess_bpc('yuvj444p', 3, 24), 8)
self.assertEqual(_guess_bpc('yuva422p12be', 4, 36), 12)
self.assertEqual(_guess_bpc('yuv420p16le', 3, 24), 16)
self.assertEqual(_guess_bpc('yuv420p', 3, 12), 8)
self.assertEqual(_guess_bpc('yuv410p', 3, 9), 8)
self.assertEqual(_guess_bpc('yuv411p', 3, 12), 8)
self.assertEqual(_guess_bpc('yuv444p10be', 3, 30), 10)
self.assertEqual(_guess_bpc('yuv440p10be', 3, 20), 10)
self.assertEqual(_guess_bpc('gbrap12be', 4, 48), 12)
self.assertEqual(_guess_bpc('gbrapf32be', 4, 128), 32)
self.assertEqual(_guess_bpc('rgb24', 3, 24), 8)
self.assertEqual(_guess_bpc('gbrp10be', 3, 30), 10)
class TestThreeDigits(unittest.TestCase):
def test(self):
self.assertEqual(_three_digits('yuva444p12be'), '444')
self.assertEqual(_three_digits('yuv420'), '420')
self.assertEqual(_three_digits('yuv456'), None)
self.assertEqual(_three_digits('yub4200'), None)
class TestEnding(unittest.TestCase):
def test(self):
self.assertEqual(_ending('x2rgb10be'), 10)
self.assertEqual(_ending('gray9be'), 9)
self.assertEqual(_ending('yuva420p9be'), 9)
self.assertEqual(_ending('rgb'), None)
class TestSsFactor(unittest.TestCase):
def test(self):
self.assertEqual(_subsampling_factor('xyz444p', 3), 1 / 1)
self.assertEqual(_subsampling_factor('xyz422p', 3), 2 / 3)
self.assertEqual(_subsampling_factor('xyz440p', 3), 2 / 3)
self.assertEqual(_subsampling_factor('xyz420p', 3), 1 / 2)
self.assertEqual(_subsampling_factor('xyz444p', 4), 1 / 1)
self.assertEqual(_subsampling_factor('xyz422p', 4), 3 / 4)
self.assertEqual(_subsampling_factor('xyz420p', 4), 5 / 8)
| 41.132075 | 76 | 0.676606 |
import unittest
from vtcff._pf_15_pixfmt_subsampling import _three_digits
from vtcff._pf_20_pixfmt_bpc import _guess_bpc, _ending, _subsampling_factor
class TestGuess(unittest.TestCase):
def test(self):
self.assertEqual(_guess_bpc('yuva444p12be', 4, 48), 12)
self.assertEqual(_guess_bpc('yuvj444p', 3, 24), 8)
self.assertEqual(_guess_bpc('yuva422p12be', 4, 36), 12)
self.assertEqual(_guess_bpc('yuv420p16le', 3, 24), 16)
self.assertEqual(_guess_bpc('yuv420p', 3, 12), 8)
self.assertEqual(_guess_bpc('yuv410p', 3, 9), 8)
self.assertEqual(_guess_bpc('yuv411p', 3, 12), 8)
self.assertEqual(_guess_bpc('yuv444p10be', 3, 30), 10)
self.assertEqual(_guess_bpc('yuv440p10be', 3, 20), 10)
self.assertEqual(_guess_bpc('gbrap12be', 4, 48), 12)
self.assertEqual(_guess_bpc('gbrapf32be', 4, 128), 32)
self.assertEqual(_guess_bpc('rgb24', 3, 24), 8)
self.assertEqual(_guess_bpc('gbrp10be', 3, 30), 10)
class TestThreeDigits(unittest.TestCase):
def test(self):
self.assertEqual(_three_digits('yuva444p12be'), '444')
self.assertEqual(_three_digits('yuv420'), '420')
self.assertEqual(_three_digits('yuv456'), None)
self.assertEqual(_three_digits('yub4200'), None)
class TestEnding(unittest.TestCase):
def test(self):
self.assertEqual(_ending('x2rgb10be'), 10)
self.assertEqual(_ending('gray9be'), 9)
self.assertEqual(_ending('yuva420p9be'), 9)
self.assertEqual(_ending('rgb'), None)
class TestSsFactor(unittest.TestCase):
def test(self):
self.assertEqual(_subsampling_factor('xyz444p', 3), 1 / 1)
self.assertEqual(_subsampling_factor('xyz422p', 3), 2 / 3)
self.assertEqual(_subsampling_factor('xyz440p', 3), 2 / 3)
self.assertEqual(_subsampling_factor('xyz420p', 3), 1 / 2)
self.assertEqual(_subsampling_factor('xyz444p', 4), 1 / 1)
self.assertEqual(_subsampling_factor('xyz422p', 4), 3 / 4)
self.assertEqual(_subsampling_factor('xyz420p', 4), 5 / 8)
| true | true |
f7fdd87a76d2bfd0c448aa05fdb8aba9f726cd3e | 5,182 | py | Python | features/generator_features.py | aolabNeuro/brain-python-interface | 11590717e1a1a2d5cc89a0495f02170b1b5f3c08 | [
"Apache-2.0"
] | 2 | 2020-04-14T23:35:36.000Z | 2020-09-11T01:34:50.000Z | features/generator_features.py | aolabNeuro/brain-python-interface | 11590717e1a1a2d5cc89a0495f02170b1b5f3c08 | [
"Apache-2.0"
] | 89 | 2020-08-03T16:54:08.000Z | 2022-03-09T19:56:19.000Z | features/generator_features.py | aolabNeuro/brain-python-interface | 11590717e1a1a2d5cc89a0495f02170b1b5f3c08 | [
"Apache-2.0"
] | 3 | 2020-08-10T21:29:26.000Z | 2021-01-18T18:29:09.000Z | '''
Features which have task-like functionality w.r.t. task...
'''
import time
import tempfile
import random
import traceback
import numpy as np
import fnmatch
import os
import subprocess
from riglib.experiment import traits
class Autostart(traits.HasTraits):
'''
Automatically begins the trial from the wait state,
with a random interval drawn from `rand_start`. Doesn't really
work if there are multiple trials in between wait states.
'''
rand_start = traits.Tuple((0., 0.), desc="Start interval")
exclude_parent_traits = ['wait_time']
def _start_wait(self):
'''
At the start of the 'wait' state, determine how long to wait before starting the trial
by drawing a sample from the rand_start interval
'''
s, e = self.rand_start
self.wait_time = random.random()*(e-s) + s
super(Autostart, self)._start_wait()
def _test_start_trial(self, ts):
'''
Test if the required random wait time has passed
'''
return ts > self.wait_time and not self.pause
class AdaptiveGenerator(object):
'''
Deprecated--this class appears to be unused
'''
def __init__(self, *args, **kwargs):
super(AdaptiveGenerator, self).__init__(*args, **kwargs)
assert hasattr(self.gen, "correct"), "Must use adaptive generator!"
def _start_reward(self):
self.gen.correct()
super(AdaptiveGenerator, self)._start_reward()
def _start_incorrect(self):
self.gen.incorrect()
super(AdaptiveGenerator, self)._start_incorrect()
class IgnoreCorrectness(object):
'''Deprecated--this class appears to be unused and not compatible with Sequences
Allows any response to be correct, not just the one defined. Overrides for trialtypes'''
def __init__(self, *args, **kwargs):
super(IgnoreCorrectness, self).__init__(*args, **kwargs)
if hasattr(self, "trial_types"):
for ttype in self.trial_types:
del self.status[ttype]["%s_correct"%ttype]
del self.status[ttype]["%s_incorrect"%ttype]
self.status[ttype]["correct"] = "reward"
self.status[ttype]["incorrect"] = "penalty"
def _test_correct(self, ts):
return self.event is not None
def _test_incorrect(self, ts):
return False
class MultiHoldTime(traits.HasTraits):
'''
Deprecated--Use RandomDelay instead.
Allows the hold time parameter to be multiple values per target in a given sequence chain. For instance,
center targets and peripheral targets can have different hold times.
'''
hold_time = traits.List([.2,], desc="Length of hold required at targets before next target appears. \
Can be a single number or a list of numbers to apply to each target in the sequence (center, out, etc.)")
def _test_hold_complete(self, time_in_state):
'''
Test whether the target is held long enough to declare the
trial a success
Possible options
- Target held for the minimum requred time (implemented here)
- Sensorized object moved by a certain amount
- Sensorized object moved to the required location
- Manually triggered by experimenter
'''
if len(self.hold_time) == 1:
hold_time = self.hold_time[0]
else:
hold_time = self.hold_time[self.target_index]
return time_in_state > hold_time
class RandomDelay(traits.HasTraits):
'''
Replaces 'delay_time' with 'rand_delay', an interval on which the delay period is selected uniformly.
'''
rand_delay = traits.Tuple((0., 0.), desc="Delay interval")
exclude_parent_traits = ['delay_time']
def _start_wait(self):
'''
At the start of the 'wait' state, draw a sample from the rand_delay interval for this trial.
'''
s, e = self.rand_delay
self.delay_time = random.random()*(e-s) + s
super()._start_wait()
class TransparentDelayTarget(traits.HasTraits):
'''
Feature to make the delay period show a semi-transparent target rather than the full target. Used
for training the go cue. Gradually increase the alpha from 0 to 0.75 once a long enough delay
period has been established.
'''
delay_target_alpha = traits.Float(0.25, desc="Transparency of the next target during delay periods")
def _start_delay(self):
super()._start_delay()
# Set the alpha of the next target
next_idx = (self.target_index + 1)
if next_idx < self.chain_length:
target = self.targets[next_idx % 2]
self._old_target_color = np.copy(target.sphere.color)
new_target_color = list(target.sphere.color)
new_target_color[3] = self.delay_target_alpha
target.sphere.color = tuple(new_target_color)
def _start_target(self):
super()._start_target()
# Reset the transparency of the current target
if self.target_index > 0:
target = self.targets[self.target_index % 2]
target.sphere.color = self._old_target_color
| 35.013514 | 113 | 0.654766 |
import time
import tempfile
import random
import traceback
import numpy as np
import fnmatch
import os
import subprocess
from riglib.experiment import traits
class Autostart(traits.HasTraits):
rand_start = traits.Tuple((0., 0.), desc="Start interval")
exclude_parent_traits = ['wait_time']
def _start_wait(self):
s, e = self.rand_start
self.wait_time = random.random()*(e-s) + s
super(Autostart, self)._start_wait()
def _test_start_trial(self, ts):
return ts > self.wait_time and not self.pause
class AdaptiveGenerator(object):
def __init__(self, *args, **kwargs):
super(AdaptiveGenerator, self).__init__(*args, **kwargs)
assert hasattr(self.gen, "correct"), "Must use adaptive generator!"
def _start_reward(self):
self.gen.correct()
super(AdaptiveGenerator, self)._start_reward()
def _start_incorrect(self):
self.gen.incorrect()
super(AdaptiveGenerator, self)._start_incorrect()
class IgnoreCorrectness(object):
def __init__(self, *args, **kwargs):
super(IgnoreCorrectness, self).__init__(*args, **kwargs)
if hasattr(self, "trial_types"):
for ttype in self.trial_types:
del self.status[ttype]["%s_correct"%ttype]
del self.status[ttype]["%s_incorrect"%ttype]
self.status[ttype]["correct"] = "reward"
self.status[ttype]["incorrect"] = "penalty"
def _test_correct(self, ts):
return self.event is not None
def _test_incorrect(self, ts):
return False
class MultiHoldTime(traits.HasTraits):
hold_time = traits.List([.2,], desc="Length of hold required at targets before next target appears. \
Can be a single number or a list of numbers to apply to each target in the sequence (center, out, etc.)")
def _test_hold_complete(self, time_in_state):
if len(self.hold_time) == 1:
hold_time = self.hold_time[0]
else:
hold_time = self.hold_time[self.target_index]
return time_in_state > hold_time
class RandomDelay(traits.HasTraits):
rand_delay = traits.Tuple((0., 0.), desc="Delay interval")
exclude_parent_traits = ['delay_time']
def _start_wait(self):
s, e = self.rand_delay
self.delay_time = random.random()*(e-s) + s
super()._start_wait()
class TransparentDelayTarget(traits.HasTraits):
delay_target_alpha = traits.Float(0.25, desc="Transparency of the next target during delay periods")
def _start_delay(self):
super()._start_delay()
next_idx = (self.target_index + 1)
if next_idx < self.chain_length:
target = self.targets[next_idx % 2]
self._old_target_color = np.copy(target.sphere.color)
new_target_color = list(target.sphere.color)
new_target_color[3] = self.delay_target_alpha
target.sphere.color = tuple(new_target_color)
def _start_target(self):
super()._start_target()
if self.target_index > 0:
target = self.targets[self.target_index % 2]
target.sphere.color = self._old_target_color
| true | true |
f7fdd8880ea99f126ba61a61e3b34ab49ba52b93 | 1,549 | py | Python | runtests.py | ombu/django-sortedm2m | 2691cf00174577bc667d5d8c1d42071604ee2095 | [
"BSD-3-Clause"
] | null | null | null | runtests.py | ombu/django-sortedm2m | 2691cf00174577bc667d5d8c1d42071604ee2095 | [
"BSD-3-Clause"
] | null | null | null | runtests.py | ombu/django-sortedm2m | 2691cf00174577bc667d5d8c1d42071604ee2095 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys, warnings
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
import django
from django.core.management import execute_from_command_line
if django.VERSION < (1, 6):
default_test_apps = [
'sortedm2m_tests',
'test_south_support',
]
else:
default_test_apps = [
'sortedm2m_tests',
]
# Only test south support for Django 1.6 and lower.
if django.VERSION < (1, 7):
default_test_apps += [
'test_south_support',
]
def runtests(*args):
if django.VERSION > (1, 8):
warnings.simplefilter("error", Warning)
warnings.filterwarnings("ignore", module="distutils")
try:
warnings.filterwarnings("ignore", category=ResourceWarning)
except NameError:
pass
warnings.filterwarnings("ignore", "invalid escape sequence", DeprecationWarning)
# Ignore a python 3.6 DeprecationWarning in ModelBase.__new__ that isn't
# fixed in Django 1.x
if sys.version_info > (3, 6) and django.VERSION < (2,):
warnings.filterwarnings(
"ignore", "__class__ not set defining", DeprecationWarning)
test_apps = list(args or default_test_apps)
execute_from_command_line([sys.argv[0], 'test', '--verbosity=1'] + test_apps)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| 28.163636 | 88 | 0.654616 |
from __future__ import unicode_literals
import os, sys, warnings
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
import django
from django.core.management import execute_from_command_line
if django.VERSION < (1, 6):
default_test_apps = [
'sortedm2m_tests',
'test_south_support',
]
else:
default_test_apps = [
'sortedm2m_tests',
]
if django.VERSION < (1, 7):
default_test_apps += [
'test_south_support',
]
def runtests(*args):
if django.VERSION > (1, 8):
warnings.simplefilter("error", Warning)
warnings.filterwarnings("ignore", module="distutils")
try:
warnings.filterwarnings("ignore", category=ResourceWarning)
except NameError:
pass
warnings.filterwarnings("ignore", "invalid escape sequence", DeprecationWarning)
# fixed in Django 1.x
if sys.version_info > (3, 6) and django.VERSION < (2,):
warnings.filterwarnings(
"ignore", "__class__ not set defining", DeprecationWarning)
test_apps = list(args or default_test_apps)
execute_from_command_line([sys.argv[0], 'test', '--verbosity=1'] + test_apps)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| true | true |
f7fdd8b5de11cfffbe6654456d7b96e8f270da1c | 502 | py | Python | data/scripts/templates/object/tangible/wearables/armor/ithorian_guardian/shared_ith_armor_s02_chest_plate.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/wearables/armor/ithorian_guardian/shared_ith_armor_s02_chest_plate.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/wearables/armor/ithorian_guardian/shared_ith_armor_s02_chest_plate.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/armor/ithorian_guardian/shared_ith_armor_s02_chest_plate.iff"
result.attribute_template_id = 0
result.stfName("wearables_name","ith_armor_s02_chest_plate")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 29.529412 | 107 | 0.75498 | true | true | |
f7fdd94b940af905ef11d3e121465ead8e5882db | 2,189 | py | Python | model/gru.py | susht3/Text_Mutil_Classification_keras | 82e43bec29588c620a731323c5bf95c50c94c119 | [
"MIT"
] | null | null | null | model/gru.py | susht3/Text_Mutil_Classification_keras | 82e43bec29588c620a731323c5bf95c50c94c119 | [
"MIT"
] | null | null | null | model/gru.py | susht3/Text_Mutil_Classification_keras | 82e43bec29588c620a731323c5bf95c50c94c119 | [
"MIT"
] | null | null | null | from keras.layers import Input, Dense, Dropout, Flatten, merge, Reshape,Embedding
from keras.models import Model
from keras.layers import LSTM,TimeDistributed,Bidirectional, Highway,Activation,Convolution1D, MaxPooling1D,GRU,AveragePooling1D
#SimpleDeepRNN
from keras.optimizers import SGD
from keras.layers.normalization import BatchNormalization
def build_cnn_bgru(input_set_size, width, height, mul_nb_classes):
print('cnn-lstm model building...')
inputs = Input(shape=(height,),dtype='int32')
embedd = Embedding(input_set_size, width, input_length=height)(inputs)
# conv
conv1_1 = Convolution1D(64, 3, border_mode='same', activation='relu')(embedd)
bn1 = BatchNormalization(mode=1)(conv1_1)
pool1 = MaxPooling1D(pool_length=2)(bn1)
drop1 = Dropout(0.2)(pool1)
# 2 conv
conv2_1 = Convolution1D(128, 3, border_mode='same', activation='relu')(drop1)
bn2 = BatchNormalization(mode=1)(conv2_1)
pool2 = MaxPooling1D(pool_length=2)(bn2)
drop2 = Dropout(0.2)(pool2)
'''
# 3 conv
conv3_1 = Convolution1D(160, 2, border_mode='same', activation='relu')(drop2)
bn3 = BatchNormalization(mode=1)(conv3_1)
#pool3 = MaxPooling1D(pool_length=2)(bn3)
drop3 = Dropout(0.2)(bn3)
'''
#b = merge([bn4, drop3], mode='concat')
blstm = Bidirectional(LSTM(256,return_sequences=False), merge_mode='sum')(drop2)
#gru = AveragePooling1D(pool_length=2)(bgru)
drop = Dropout(0.5)(blstm)
# output
out1 = Dense(mul_nb_classes[0], activation='sigmoid')(drop)
merged1 = merge([out1, drop], mode='concat')
out2 = Dense(mul_nb_classes[1], activation='sigmoid')(merged1)
merged2 = merge([out2, drop], mode='concat')
out3 = Dense(mul_nb_classes[2], activation='sigmoid')(merged2)
merged3 = merge([out3, drop], mode='concat')
out4 = Dense(mul_nb_classes[3], activation='sigmoid')(merged3)
out = [out1, out2, out3, out4]
model = Model(input=[inputs], output=out)
model.summary()
#sgd = SGD(lr=0.01, momentum=0.9, decay=0.1, nesterov=False)
model.compile(loss='binary_crossentropy',
#optimizer = 'sgd',
optimizer='adam',
metrics=['accuracy'],
)
print("cnn-lstm model has built.")
return model
| 37.741379 | 128 | 0.706715 | from keras.layers import Input, Dense, Dropout, Flatten, merge, Reshape,Embedding
from keras.models import Model
from keras.layers import LSTM,TimeDistributed,Bidirectional, Highway,Activation,Convolution1D, MaxPooling1D,GRU,AveragePooling1D
from keras.optimizers import SGD
from keras.layers.normalization import BatchNormalization
def build_cnn_bgru(input_set_size, width, height, mul_nb_classes):
print('cnn-lstm model building...')
inputs = Input(shape=(height,),dtype='int32')
embedd = Embedding(input_set_size, width, input_length=height)(inputs)
conv1_1 = Convolution1D(64, 3, border_mode='same', activation='relu')(embedd)
bn1 = BatchNormalization(mode=1)(conv1_1)
pool1 = MaxPooling1D(pool_length=2)(bn1)
drop1 = Dropout(0.2)(pool1)
conv2_1 = Convolution1D(128, 3, border_mode='same', activation='relu')(drop1)
bn2 = BatchNormalization(mode=1)(conv2_1)
pool2 = MaxPooling1D(pool_length=2)(bn2)
drop2 = Dropout(0.2)(pool2)
blstm = Bidirectional(LSTM(256,return_sequences=False), merge_mode='sum')(drop2)
drop = Dropout(0.5)(blstm)
out1 = Dense(mul_nb_classes[0], activation='sigmoid')(drop)
merged1 = merge([out1, drop], mode='concat')
out2 = Dense(mul_nb_classes[1], activation='sigmoid')(merged1)
merged2 = merge([out2, drop], mode='concat')
out3 = Dense(mul_nb_classes[2], activation='sigmoid')(merged2)
merged3 = merge([out3, drop], mode='concat')
out4 = Dense(mul_nb_classes[3], activation='sigmoid')(merged3)
out = [out1, out2, out3, out4]
model = Model(input=[inputs], output=out)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'],
)
print("cnn-lstm model has built.")
return model
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.