content
stringlengths
27
928k
path
stringlengths
4
230
size
int64
27
928k
nl_text
stringlengths
21
396k
nl_size
int64
21
396k
nl_language
stringlengths
2
3
nl_language_score
float64
0.04
1
# Generated by Django 3.0.6 on 2020-05-28 17:21 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Action', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(help_text='Provide name of your service', max_length=50)), ('description', models.CharField(help_text='Provide short decription', max_length=500)), ('bootstrap_icon', models.CharField(help_text='Enter bootstrap icon here', max_length=500)), ('link', models.CharField(help_text='Provide link to an action', max_length=100)), ], ), migrations.CreateModel( name='Task', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(help_text='Enter task title', max_length=50)), ('description', models.CharField(help_text='Enter task description', max_length=500)), ('bootstrap_icon', models.CharField(help_text='Enter bootstrap icon here', max_length=500)), ('link', models.CharField(help_text='Путь к настройке задания', max_length=100)), ], ), migrations.CreateModel( name='ViberToken', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('data', models.CharField(max_length=100)), ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='TelegramToken', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('data', models.CharField(max_length=100)), ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('telegram_tokens', models.ManyToManyField(to='botapp.TelegramToken')), ('viber_tokens', models.ManyToManyField(to='botapp.ViberToken')), ], ), ]
projectamber/botapp/migrations/0001_initial.py
3,115
Generated by Django 3.0.6 on 2020-05-28 17:21
45
en
0.646485
"""Tests for the HTTP API Client.""" import pytest import solana.system_program as sp from solana.rpc.api import DataSliceOpt, Client from solana.keypair import Keypair from solana.rpc.core import RPCException from solana.rpc.types import RPCError from solana.transaction import Transaction from solana.rpc.commitment import Finalized from spl.token.constants import WRAPPED_SOL_MINT from .utils import AIRDROP_AMOUNT, assert_valid_response @pytest.mark.integration def test_request_air_drop(stubbed_sender: Keypair, test_http_client: Client): """Test air drop to stubbed_sender.""" resp = test_http_client.request_airdrop(stubbed_sender.public_key, AIRDROP_AMOUNT) assert_valid_response(resp) test_http_client.confirm_transaction(resp["result"]) balance = test_http_client.get_balance(stubbed_sender.public_key) assert balance["result"]["value"] == AIRDROP_AMOUNT @pytest.mark.integration def test_request_air_drop_prefetched_blockhash(stubbed_sender_prefetched_blockhash, test_http_client): """Test air drop to stubbed_sender.""" resp = test_http_client.request_airdrop(stubbed_sender_prefetched_blockhash.public_key, AIRDROP_AMOUNT) assert_valid_response(resp) test_http_client.confirm_transaction(resp["result"]) balance = test_http_client.get_balance(stubbed_sender_prefetched_blockhash.public_key) assert balance["result"]["value"] == AIRDROP_AMOUNT @pytest.mark.integration def test_request_air_drop_cached_blockhash(stubbed_sender_cached_blockhash, test_http_client): """Test air drop to stubbed_sender.""" resp = test_http_client.request_airdrop(stubbed_sender_cached_blockhash.public_key, AIRDROP_AMOUNT) assert_valid_response(resp) test_http_client.confirm_transaction(resp["result"]) assert_valid_response(resp) balance = test_http_client.get_balance(stubbed_sender_cached_blockhash.public_key) assert balance["result"]["value"] == AIRDROP_AMOUNT @pytest.mark.integration def test_send_invalid_transaction(test_http_client): """Test sending an invalid transaction to localnet.""" # Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver with pytest.raises(RPCException) as exc_info: test_http_client.send_raw_transaction(b"foo") assert exc_info.value.args[0].keys() == RPCError.__annotations__.keys() # pylint: disable=no-member @pytest.mark.integration def test_send_transaction_and_get_balance(stubbed_sender, stubbed_receiver, test_http_client): """Test sending a transaction to localnet.""" # Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver transfer_tx = Transaction().add( sp.transfer(sp.TransferParams(from_pubkey=stubbed_sender.public_key, to_pubkey=stubbed_receiver, lamports=1000)) ) resp = test_http_client.send_transaction(transfer_tx, stubbed_sender) assert_valid_response(resp) # Confirm transaction test_http_client.confirm_transaction(resp["result"]) # Check balances resp = test_http_client.get_balance(stubbed_sender.public_key) assert_valid_response(resp) assert resp["result"]["value"] == 9999994000 resp = test_http_client.get_balance(stubbed_receiver) assert_valid_response(resp) assert resp["result"]["value"] == 954 @pytest.mark.integration def test_send_transaction_prefetched_blockhash( stubbed_sender_prefetched_blockhash, stubbed_receiver_prefetched_blockhash, test_http_client ): """Test sending a transaction to localnet.""" # Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver transfer_tx = Transaction().add( sp.transfer( sp.TransferParams( from_pubkey=stubbed_sender_prefetched_blockhash.public_key, to_pubkey=stubbed_receiver_prefetched_blockhash, lamports=1000, ) ) ) recent_blockhash = test_http_client.parse_recent_blockhash(test_http_client.get_recent_blockhash(Finalized)) resp = test_http_client.send_transaction( transfer_tx, stubbed_sender_prefetched_blockhash, recent_blockhash=recent_blockhash ) assert_valid_response(resp) # Confirm transaction test_http_client.confirm_transaction(resp["result"]) # Check balances resp = test_http_client.get_balance(stubbed_sender_prefetched_blockhash.public_key) assert_valid_response(resp) assert resp["result"]["value"] == 9999994000 resp = test_http_client.get_balance(stubbed_receiver_prefetched_blockhash) assert_valid_response(resp) assert resp["result"]["value"] == 954 @pytest.mark.integration def test_send_transaction_cached_blockhash( stubbed_sender_cached_blockhash, stubbed_receiver_cached_blockhash, test_http_client_cached_blockhash ): """Test sending a transaction to localnet.""" # Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver transfer_tx = Transaction().add( sp.transfer( sp.TransferParams( from_pubkey=stubbed_sender_cached_blockhash.public_key, to_pubkey=stubbed_receiver_cached_blockhash, lamports=1000, ) ) ) assert len(test_http_client_cached_blockhash.blockhash_cache.unused_blockhashes) == 0 assert len(test_http_client_cached_blockhash.blockhash_cache.used_blockhashes) == 0 resp = test_http_client_cached_blockhash.send_transaction(transfer_tx, stubbed_sender_cached_blockhash) # we could have got a new blockhash or not depending on network latency and luck assert len(test_http_client_cached_blockhash.blockhash_cache.unused_blockhashes) in (0, 1) assert len(test_http_client_cached_blockhash.blockhash_cache.used_blockhashes) == 1 assert_valid_response(resp) # Confirm transaction test_http_client_cached_blockhash.confirm_transaction(resp["result"]) # Check balances resp = test_http_client_cached_blockhash.get_balance(stubbed_sender_cached_blockhash.public_key) assert_valid_response(resp) assert resp["result"]["value"] == 9999994000 # Second transaction transfer_tx = Transaction().add( sp.transfer( sp.TransferParams( from_pubkey=stubbed_sender_cached_blockhash.public_key, to_pubkey=stubbed_receiver_cached_blockhash, lamports=2000, ) ) ) resp = test_http_client_cached_blockhash.get_balance(stubbed_receiver_cached_blockhash) assert_valid_response(resp) assert resp["result"]["value"] == 954 resp = test_http_client_cached_blockhash.send_transaction(transfer_tx, stubbed_sender_cached_blockhash) # we could have got a new blockhash or not depending on network latency and luck assert len(test_http_client_cached_blockhash.blockhash_cache.unused_blockhashes) in (0, 1) assert len(test_http_client_cached_blockhash.blockhash_cache.used_blockhashes) in (1, 2) assert_valid_response(resp) # Confirm transaction test_http_client_cached_blockhash.confirm_transaction(resp["result"]) # Check balances resp = test_http_client_cached_blockhash.get_balance(stubbed_sender_cached_blockhash.public_key) assert_valid_response(resp) assert resp["result"]["value"] == 9999987000 assert len(test_http_client_cached_blockhash.blockhash_cache.unused_blockhashes) == 1 assert len(test_http_client_cached_blockhash.blockhash_cache.used_blockhashes) == 1 @pytest.mark.integration def test_send_raw_transaction_and_get_balance(stubbed_sender, stubbed_receiver, test_http_client): """Test sending a raw transaction to localnet.""" # Get a recent blockhash resp = test_http_client.get_recent_blockhash(Finalized) assert_valid_response(resp) recent_blockhash = resp["result"]["value"]["blockhash"] # Create transfer tx transfer lamports from stubbed sender to stubbed_receiver transfer_tx = Transaction(recent_blockhash=recent_blockhash).add( sp.transfer(sp.TransferParams(from_pubkey=stubbed_sender.public_key, to_pubkey=stubbed_receiver, lamports=1000)) ) # Sign transaction transfer_tx.sign(stubbed_sender) # Send raw transaction resp = test_http_client.send_raw_transaction(transfer_tx.serialize()) assert_valid_response(resp) # Confirm transaction test_http_client.confirm_transaction(resp["result"]) # Check balances resp = test_http_client.get_balance(stubbed_sender.public_key) assert_valid_response(resp) assert resp["result"]["value"] == 9999988000 resp = test_http_client.get_balance(stubbed_receiver) assert_valid_response(resp) assert resp["result"]["value"] == 1954 @pytest.mark.integration def test_confirm_bad_signature(test_http_client: Client) -> None: """Test that RPCException is raised when trying to confirm an invalid signature.""" with pytest.raises(RPCException) as exc_info: test_http_client.confirm_transaction("foo") err_object = exc_info.value.args[0] assert err_object == {"code": -32602, "message": "Invalid param: WrongSize"} @pytest.mark.integration def test_get_block_commitment(test_http_client): """Test get block commitment.""" resp = test_http_client.get_block_commitment(5) assert_valid_response(resp) @pytest.mark.integration def test_get_block_time(test_http_client): """Test get block time.""" resp = test_http_client.get_block_time(5) assert_valid_response(resp) @pytest.mark.integration def test_get_cluster_nodes(test_http_client): """Test get cluster nodes.""" resp = test_http_client.get_cluster_nodes() assert_valid_response(resp) @pytest.mark.integration def test_get_confirmed_block(test_http_client): """Test get confirmed block.""" resp = test_http_client.get_confirmed_block(1) assert_valid_response(resp) @pytest.mark.integration def test_get_confirmed_block_with_encoding(test_http_client): """Test get confrimed block with encoding.""" resp = test_http_client.get_confirmed_block(1, encoding="base64") assert_valid_response(resp) @pytest.mark.integration def test_get_block(test_http_client): """Test get block.""" resp = test_http_client.get_block(1) assert_valid_response(resp) @pytest.mark.integration def test_get_block_with_encoding(test_http_client): """Test get block with encoding.""" resp = test_http_client.get_block(1, encoding="base64") assert_valid_response(resp) @pytest.mark.integration def test_get_block_height(test_http_client): """Test get height.""" resp = test_http_client.get_block_height() assert_valid_response(resp) @pytest.mark.integration def test_get_confirmed_blocks(test_http_client): """Test get confirmed blocks.""" resp = test_http_client.get_confirmed_blocks(5, 10) assert_valid_response(resp) @pytest.mark.integration def test_get_blocks(test_http_client): """Test get blocks.""" resp = test_http_client.get_blocks(5, 10) assert_valid_response(resp) @pytest.mark.integration def test_get_confirmed_signature_for_address2(test_http_client): """Test get confirmed signature for address2.""" resp = test_http_client.get_confirmed_signature_for_address2("Vote111111111111111111111111111111111111111", limit=1) assert_valid_response(resp) @pytest.mark.integration def test_get_signatures_for_address(test_http_client): """Test get signatures for addresses.""" resp = test_http_client.get_signatures_for_address("Vote111111111111111111111111111111111111111", limit=1) assert_valid_response(resp) @pytest.mark.integration def test_get_epoch_info(test_http_client): """Test get epoch info.""" resp = test_http_client.get_epoch_info() assert_valid_response(resp) @pytest.mark.integration def test_get_epoch_schedule(test_http_client): """Test get epoch schedule.""" resp = test_http_client.get_epoch_schedule() assert_valid_response(resp) @pytest.mark.integration def test_get_fee_calculator_for_blockhash(test_http_client): """Test get fee calculator for blockhash.""" resp = test_http_client.get_recent_blockhash(Finalized) assert_valid_response(resp) resp = test_http_client.get_fee_calculator_for_blockhash(resp["result"]["value"]["blockhash"]) assert_valid_response(resp) @pytest.mark.integration def test_get_slot(test_http_client): """Test get slot.""" resp = test_http_client.get_slot() assert_valid_response(resp) @pytest.mark.integration def test_get_fees(test_http_client): """Test get fees.""" resp = test_http_client.get_fees() assert_valid_response(resp) @pytest.mark.integration def test_get_first_available_block(test_http_client): """Test get first available block.""" resp = test_http_client.get_first_available_block() assert_valid_response(resp) @pytest.mark.integration def test_get_genesis_hash(test_http_client): """Test get genesis hash.""" resp = test_http_client.get_genesis_hash() assert_valid_response(resp) @pytest.mark.integration def test_get_identity(test_http_client): """Test get identity.""" resp = test_http_client.get_genesis_hash() assert_valid_response(resp) @pytest.mark.integration def test_get_inflation_governor(test_http_client): """Test get inflation governor.""" resp = test_http_client.get_inflation_governor() assert_valid_response(resp) @pytest.mark.integration def test_get_inflation_rate(test_http_client): """Test get inflation rate.""" resp = test_http_client.get_inflation_rate() assert_valid_response(resp) @pytest.mark.integration def test_get_largest_accounts(test_http_client): """Test get largest accounts.""" resp = test_http_client.get_largest_accounts() assert_valid_response(resp) @pytest.mark.integration def test_get_leader_schedule(test_http_client): """Test get leader schedule.""" resp = test_http_client.get_leader_schedule() assert_valid_response(resp) @pytest.mark.integration def test_get_minimum_balance_for_rent_exemption(test_http_client): """Test get minimum balance for rent exemption.""" resp = test_http_client.get_minimum_balance_for_rent_exemption(50) assert_valid_response(resp) @pytest.mark.integration def test_get_slot_leader(test_http_client): """Test get slot leader.""" resp = test_http_client.get_slot_leader() assert_valid_response(resp) @pytest.mark.integration def test_get_supply(test_http_client): """Test get slot leader.""" resp = test_http_client.get_supply() assert_valid_response(resp) @pytest.mark.integration def test_get_transaction_count(test_http_client): """Test get transactinon count.""" resp = test_http_client.get_transaction_count() assert_valid_response(resp) @pytest.mark.integration def test_get_version(test_http_client): """Test get version.""" resp = test_http_client.get_version() assert_valid_response(resp) @pytest.mark.integration def test_get_account_info(stubbed_sender, test_http_client): """Test get_account_info.""" resp = test_http_client.get_account_info(stubbed_sender.public_key) assert_valid_response(resp) resp = test_http_client.get_account_info(stubbed_sender.public_key, encoding="jsonParsed") assert_valid_response(resp) resp = test_http_client.get_account_info(stubbed_sender.public_key, data_slice=DataSliceOpt(1, 1)) assert_valid_response(resp) @pytest.mark.integration def test_get_multiple_accounts(stubbed_sender, test_http_client): """Test get_multiple_accounts.""" pubkeys = [stubbed_sender.public_key] * 2 resp = test_http_client.get_multiple_accounts(pubkeys) assert_valid_response(resp) resp = test_http_client.get_multiple_accounts(pubkeys, encoding="jsonParsed") assert_valid_response(resp) resp = test_http_client.get_multiple_accounts(pubkeys, data_slice=DataSliceOpt(1, 1)) assert_valid_response(resp) @pytest.mark.integration def test_get_token_largest_accounts(test_http_client): """Test get token largest accounts.""" resp = test_http_client.get_token_largest_accounts(WRAPPED_SOL_MINT) assert_valid_response(resp) @pytest.mark.integration def test_get_token_supply(test_http_client): """Test get token supply.""" resp = test_http_client.get_token_supply(WRAPPED_SOL_MINT) assert_valid_response(resp) @pytest.mark.integration def test_get_vote_accounts(test_http_client): """Test get vote accounts.""" resp = test_http_client.get_vote_accounts() assert_valid_response(resp)
tests/integration/test_http_client.py
16,558
Test that RPCException is raised when trying to confirm an invalid signature. Test get_account_info. Test get block. Test get block commitment. Test get height. Test get block time. Test get block with encoding. Test get blocks. Test get cluster nodes. Test get confirmed block. Test get confrimed block with encoding. Test get confirmed blocks. Test get confirmed signature for address2. Test get epoch info. Test get epoch schedule. Test get fee calculator for blockhash. Test get fees. Test get first available block. Test get genesis hash. Test get identity. Test get inflation governor. Test get inflation rate. Test get largest accounts. Test get leader schedule. Test get minimum balance for rent exemption. Test get_multiple_accounts. Test get signatures for addresses. Test get slot. Test get slot leader. Test get slot leader. Test get token largest accounts. Test get token supply. Test get transactinon count. Test get version. Test get vote accounts. Test air drop to stubbed_sender. Test air drop to stubbed_sender. Test air drop to stubbed_sender. Test sending an invalid transaction to localnet. Test sending a raw transaction to localnet. Test sending a transaction to localnet. Test sending a transaction to localnet. Test sending a transaction to localnet. Tests for the HTTP API Client. Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver pylint: disable=no-member Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver Confirm transaction Check balances Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver Confirm transaction Check balances Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver we could have got a new blockhash or not depending on network latency and luck Confirm transaction Check balances Second transaction we could have got a new blockhash or not depending on network latency and luck Confirm transaction Check balances Get a recent blockhash Create transfer tx transfer lamports from stubbed sender to stubbed_receiver Sign transaction Send raw transaction Confirm transaction Check balances
2,144
en
0.835581
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The BitCore Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This module contains utilities for doing coverage analysis on the RPC interface. It provides a way to track which RPC commands are exercised during testing. """ import os REFERENCE_FILENAME = 'rpc_interface.txt' class AuthServiceProxyWrapper(object): """ An object that wraps AuthServiceProxy to record specific RPC calls. """ def __init__(self, auth_service_proxy_instance, coverage_logfile=None): """ Kwargs: auth_service_proxy_instance (AuthServiceProxy): the instance being wrapped. coverage_logfile (str): if specified, write each service_name out to a file when called. """ self.auth_service_proxy_instance = auth_service_proxy_instance self.coverage_logfile = coverage_logfile def __getattr__(self, *args, **kwargs): return_val = self.auth_service_proxy_instance.__getattr__( *args, **kwargs) return AuthServiceProxyWrapper(return_val, self.coverage_logfile) def __call__(self, *args, **kwargs): """ Delegates to AuthServiceProxy, then writes the particular RPC method called to a file. """ return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) rpc_method = self.auth_service_proxy_instance._service_name if self.coverage_logfile: with open(self.coverage_logfile, 'a+', encoding='utf8') as f: f.write("%s\n" % rpc_method) return return_val @property def url(self): return self.auth_service_proxy_instance.url def get_filename(dirname, n_node): """ Get a filename unique to the test process ID and node. This file will contain a list of RPC commands covered. """ pid = str(os.getpid()) return os.path.join( dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node))) def write_all_rpc_commands(dirname, node): """ Write out a list of all RPC functions available in `bitcore-cli` for coverage comparison. This will only happen once per coverage directory. Args: dirname (str): temporary test dir node (AuthServiceProxy): client Returns: bool. if the RPC interface file was written. """ filename = os.path.join(dirname, REFERENCE_FILENAME) if os.path.isfile(filename): return False help_output = node.help().split('\n') commands = set() for line in help_output: line = line.strip() # Ignore blanks and headers if line and not line.startswith('='): commands.add("%s\n" % line.split()[0]) with open(filename, 'w', encoding='utf8') as f: f.writelines(list(commands)) return True
qa/rpc-tests/test_framework/coverage.py
2,965
An object that wraps AuthServiceProxy to record specific RPC calls. Delegates to AuthServiceProxy, then writes the particular RPC method called to a file. Kwargs: auth_service_proxy_instance (AuthServiceProxy): the instance being wrapped. coverage_logfile (str): if specified, write each service_name out to a file when called. Get a filename unique to the test process ID and node. This file will contain a list of RPC commands covered. Write out a list of all RPC functions available in `bitcore-cli` for coverage comparison. This will only happen once per coverage directory. Args: dirname (str): temporary test dir node (AuthServiceProxy): client Returns: bool. if the RPC interface file was written. This module contains utilities for doing coverage analysis on the RPC interface. It provides a way to track which RPC commands are exercised during testing. !/usr/bin/env python3 Copyright (c) 2015-2016 The BitCore Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Ignore blanks and headers
1,135
en
0.786508
#!/usr/bin/env python # coding: utf-8 # This software component is licensed by ST under BSD 3-Clause license, # the "License"; You may not use this file except in compliance with the # License. You may obtain a copy of the License at: # https://opensource.org/licenses/BSD-3-Clause """KWS Feature Extraction example.""" import numpy as np import librosa import scipy from scipy.signal import hann from scipy.fftpack import dct def mfcc_col(buff_test): window = 2048 half_window = int(window / 2) n_mels = 128 n_coeff = 13 assert buff_test.shape == (window,) hann_asym_f32 = hann(window, sym=False).astype('float32') assert hann_asym_f32.shape == (window,), hann_asym_f32.shape buff_hann = buff_test * hann_asym_f32 assert buff_hann.shape == (window,), buff_hann.shape fft = np.fft.fft(buff_hann, window)[:half_window + 1] assert fft.shape == (half_window + 1,), fft.shape ps = np.abs(fft)**2 assert ps.shape == (half_window + 1,) mel = librosa.filters.mel(sr, window, n_mels) assert mel.shape == (n_mels, half_window + 1) energy = np.dot(mel, ps) assert energy.shape == (n_mels,) logamplitude = 10 * np.log10(energy) assert logamplitude.shape == (n_mels,) dct_out = dct(logamplitude, type=3) assert dct_out.shape == (n_mels,) return(dct_out[1:(n_coeff + 1)]) # buffer_bus_01 is made of first 2048 samples of "bus.wav" file sr, ys = scipy.io.wavfile.read("bus.wav") buffer_01 = ys[0:2048] mfcc_col = mfcc_col(buffer_01) print('mfcc = ', mfcc_col[:])
SensorTile/STM32CubeFunctionPack_SENSING1_V4.0.2/Middlewares/ST/STM32_AI_AudioPreprocessing_Library/Python/MFCC.py
1,650
KWS Feature Extraction example. !/usr/bin/env python coding: utf-8 This software component is licensed by ST under BSD 3-Clause license, the "License"; You may not use this file except in compliance with the License. You may obtain a copy of the License at: https://opensource.org/licenses/BSD-3-Clause buffer_bus_01 is made of first 2048 samples of "bus.wav" file
394
en
0.874031
# Generated by Django 3.2.7 on 2021-09-10 14:40 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='AppState', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('play_state', models.CharField(choices=[('PLAY', 'Play'), ('STOP', 'Stop')], default='STOP', max_length=4)), ('health_state', models.CharField(choices=[('GOOD', 'Good'), ('BAD', 'Bad'), ('PEND', 'Pending')], default='PEND', max_length=4)), ], ), migrations.CreateModel( name='AssetSource', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=200)), ('type', models.CharField(choices=[('JSON', 'Json'), ('SCRP', 'Scrape')], default='JSON', max_length=4)), ('post_title', models.CharField(max_length=200)), ('url', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='LogEntry', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time_stamp', models.DateTimeField()), ('source', models.CharField(max_length=200)), ('type', models.IntegerField(choices=[(0, 'Log'), (1, 'Warn'), (2, 'Err')])), ('text', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='Asset', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=200)), ('description', models.CharField(max_length=1000)), ('link', models.CharField(max_length=1000)), ('time_stamp', models.DateTimeField()), ('sent', models.BooleanField()), ('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.assetsource')), ], ), ]
bot/migrations/0001_initial.py
2,461
Generated by Django 3.2.7 on 2021-09-10 14:40
45
en
0.686191
import scipy from manimlib.imports import * from from_3b1b.old.eoc.chapter1 import Thumbnail as Chapter1Thumbnail from from_3b1b.old.eoc.chapter2 import Car, MoveCar, ShowSpeedometer, \ IncrementNumber, GraphCarTrajectory, SecantLineToTangentLine, \ VELOCITY_COLOR, TIME_COLOR, DISTANCE_COLOR def v_rate_func(t): return 4*t - 4*(t**2) def s_rate_func(t): return 3*(t**2) - 2*(t**3) def v_func(t): return t*(8-t) def s_func(t): return 4*t**2 - (t**3)/3. class Chapter8OpeningQuote(OpeningQuote, PiCreatureScene): CONFIG = { "quote" : [ " One should never try to prove anything that \\\\ is not ", "almost obvious", ". " ], "quote_arg_separator" : "", "highlighted_quote_terms" : { "almost obvious" : BLUE, }, "author" : "Alexander Grothendieck" } def construct(self): self.remove(self.pi_creature) OpeningQuote.construct(self) words_copy = self.quote.get_part_by_tex("obvious").copy() author = self.author author.save_state() formula = self.get_formula() formula.next_to(author, DOWN, MED_LARGE_BUFF) formula.to_edge(LEFT) self.revert_to_original_skipping_status() self.play(FadeIn(self.pi_creature)) self.play( author.next_to, self.pi_creature.get_corner(UP+LEFT), UP, self.pi_creature.change_mode, "raise_right_hand" ) self.wait(3) self.play( author.restore, self.pi_creature.change_mode, "plain" ) self.play( words_copy.next_to, self.pi_creature, LEFT, MED_SMALL_BUFF, UP, self.pi_creature.change_mode, "thinking" ) self.wait(2) self.play( Write(formula), self.pi_creature.change_mode, "confused" ) self.wait() def get_formula(self): result = TexMobject( "{d(\\sin(\\theta)) \\over \\,", "d\\theta}", "=", "\\lim_{", "h", " \\to 0}", "{\\sin(\\theta+", "h", ") - \\sin(\\theta) \\over", " h}", "=", "\\lim_{", "h", " \\to 0}", "{\\big[ \\sin(\\theta)\\cos(", "h", ") + ", "\\sin(", "h", ")\\cos(\\theta)\\big] - \\sin(\\theta) \\over", "h}", "= \\dots" ) result.set_color_by_tex("h", GREEN, substring = False) result.set_color_by_tex("d\\theta", GREEN) result.set_width(FRAME_WIDTH - 2*MED_SMALL_BUFF) return result class ThisVideo(TeacherStudentsScene): def construct(self): series = VideoSeries() series.to_edge(UP) this_video = series[7] this_video.save_state() next_video = series[8] deriv, integral, v_t, dt, equals, v_T = formula = TexMobject( "\\frac{d}{dT}", "\\int_0^T", "v(t)", "\\,dt", "=", "v(T)" ) formula.set_color_by_tex("v", VELOCITY_COLOR) formula.next_to(self.teacher.get_corner(UP+LEFT), UP, MED_LARGE_BUFF) self.play(FadeIn(series, lag_ratio = 0.5)) self.play( this_video.shift, this_video.get_height()*DOWN/2, this_video.set_color, YELLOW, self.teacher.change_mode, "raise_right_hand", ) self.play(Write(VGroup(integral, v_t, dt))) self.change_student_modes(*["erm"]*3) self.wait() self.play(Write(VGroup(deriv, equals, v_T)), ) self.change_student_modes(*["confused"]*3) self.wait(3) self.play( this_video.restore, next_video.shift, next_video.get_height()*DOWN/2, next_video.set_color, YELLOW, integral[0].copy().next_to, next_video, DOWN, MED_LARGE_BUFF, FadeOut(formula), *it.chain(*[ [pi.change_mode, "plain", pi.look_at, next_video] for pi in self.pi_creatures ]) ) self.wait(2) class InCarRestrictedView(ShowSpeedometer): CONFIG = { "speedometer_title_text" : "Your view", } def construct(self): car = Car() car.move_to(self.point_A) self.car = car car.randy.save_state() Transform(car.randy, Randolph()).update(1) car.randy.next_to(car, RIGHT, MED_LARGE_BUFF) car.randy.look_at(car) window = car[1][6].copy() window.is_subpath = False window.set_fill(BLACK, opacity = 0.75) window.set_stroke(width = 0) square = Square(stroke_color = WHITE) square.replace(VGroup(self.speedometer, self.speedometer_title)) square.scale_in_place(1.5) square.pointwise_become_partial(square, 0.25, 0.75) time_label = TextMobject("Time (in seconds):", "0") time_label.shift(2*UP) dots = VGroup(*list(map(Dot, [self.point_A, self.point_B]))) line = Line(*dots, buff = 0) line.set_color(DISTANCE_COLOR) brace = Brace(line, DOWN) brace_text = brace.get_text("Distance traveled?") #Sit in car self.add(car) self.play(Blink(car.randy)) self.play(car.randy.restore, Animation(car)) self.play(ShowCreation(window, run_time = 2)) self.wait() #Show speedometer self.introduce_added_mobjects() self.play(ShowCreation(square)) self.wait() #Travel self.play(FadeIn(time_label)) self.play( MoveCar(car, self.point_B, rate_func = s_rate_func), IncrementNumber(time_label[1], run_time = 8), MaintainPositionRelativeTo(window, car), *self.get_added_movement_anims( rate_func = v_rate_func, radians = -(16.0/70)*4*np.pi/3 ), run_time = 8 ) eight = TexMobject("8").move_to(time_label[1]) self.play(Transform( time_label[1], eight, rate_func = squish_rate_func(smooth, 0, 0.5) )) self.wait() #Ask about distance self.play(*list(map(ShowCreation, dots))) self.play(ShowCreation(line)) self.play( GrowFromCenter(brace), Write(brace_text) ) self.wait(2) class GraphDistanceVsTime(GraphCarTrajectory): CONFIG = { "y_min" : 0, "y_max" : 100, "y_axis_height" : 6, "y_tick_frequency" : 10, "y_labeled_nums" : list(range(10, 100, 10)), "y_axis_label" : "Distance (in meters)", "x_min" : -1, "x_max" : 9, "x_axis_width" : 9, "x_tick_frequency" : 1, "x_leftmost_tick" : None, #Change if different from x_min "x_labeled_nums" : list(range(1, 9)), "x_axis_label" : "$t$", "time_of_journey" : 8, "care_movement_rate_func" : s_rate_func, "num_graph_anchor_points" : 100 } def construct(self): self.setup_axes() graph = self.get_graph( s_func, color = DISTANCE_COLOR, x_min = 0, x_max = 8, ) origin = self.coords_to_point(0, 0) graph_label = self.get_graph_label( graph, "s(t)", color = DISTANCE_COLOR ) self.introduce_graph(graph, origin) class PlotVelocity(GraphScene): CONFIG = { "x_min" : -1, "x_max" : 9, "x_axis_width" : 9, "x_tick_frequency" : 1, "x_labeled_nums" : list(range(1, 9)), "x_axis_label" : "$t$", "y_min" : 0, "y_max" : 25, "y_axis_height" : 6, "y_tick_frequency" : 5, "y_labeled_nums" : list(range(5, 30, 5)), "y_axis_label" : "Velocity in $\\frac{\\text{meters}}{\\text{second}}$", "num_graph_anchor_points" : 50, } def construct(self): self.setup_axes() self.add_speedometer() self.plot_points() self.draw_curve() def add_speedometer(self): speedometer = Speedometer() speedometer.next_to(self.y_axis_label_mob, RIGHT, LARGE_BUFF) speedometer.to_edge(UP) self.play(DrawBorderThenFill( speedometer, lag_ratio = 0.5, rate_func=linear, )) self.speedometer = speedometer def plot_points(self): times = list(range(0, 9)) points = [ self.coords_to_point(t, v_func(t)) for t in times ] dots = VGroup(*[Dot(p, radius = 0.07) for p in points]) dots.set_color(VELOCITY_COLOR) pre_dots = VGroup() dot_intro_anims = [] for time, dot in zip(times, dots): pre_dot = dot.copy() self.speedometer.move_needle_to_velocity(v_func(time)) pre_dot.move_to(self.speedometer.get_needle_tip()) pre_dot.set_fill(opacity = 0) pre_dots.add(pre_dot) dot_intro_anims += [ ApplyMethod( pre_dot.set_fill, YELLOW, 1, run_time = 0.1, ), ReplacementTransform( pre_dot, dot, run_time = 0.9, ) ] self.speedometer.move_needle_to_velocity(0) self.play( Succession( *dot_intro_anims, rate_func=linear ), ApplyMethod( self.speedometer.move_needle_to_velocity, v_func(4), rate_func = squish_rate_func( lambda t : 1-v_rate_func(t), 0, 0.95, ) ), run_time = 5 ) self.wait() def draw_curve(self): graph, label = self.get_v_graph_and_label() self.revert_to_original_skipping_status() self.play(ShowCreation(graph, run_time = 3)) self.play(Write(graph_label)) self.wait() ## def get_v_graph_and_label(self): graph = self.get_graph( v_func, x_min = 0, x_max = 8, color = VELOCITY_COLOR ) graph_label = TexMobject("v(t)", "=t(8-t)") graph_label.set_color_by_tex("v(t)", VELOCITY_COLOR) graph_label.next_to( graph.point_from_proportion(7./8.), UP+RIGHT ) self.v_graph = graph self.v_graph_label = graph_label return graph, graph_label class Chapter2Wrapper(Scene): CONFIG = { "title" : "Chapter 2: The paradox of the derivative", } def construct(self): title = TextMobject(self.title) title.to_edge(UP) rect = Rectangle(width = 16, height = 9, color = WHITE) rect.set_height(1.5*FRAME_Y_RADIUS) rect.next_to(title, DOWN) self.add(title) self.play(ShowCreation(rect)) self.wait(3) class GivenDistanceWhatIsVelocity(GraphCarTrajectory): def construct(self): self.force_skipping() self.setup_axes() graph = self.graph_sigmoid_trajectory_function() origin = self.coords_to_point(0, 0) self.introduce_graph(graph, origin) self.comment_on_slope(graph, origin) self.revert_to_original_skipping_status() self.show_velocity_graph() class DerivativeOfDistance(SecantLineToTangentLine): def construct(self): self.setup_axes() self.remove(self.y_axis_label_mob, self.x_axis_label_mob) self.add_derivative_definition(self.y_axis_label_mob) self.add_graph() self.draw_axes() self.show_tangent_line() class AskAboutAntiderivative(PlotVelocity): def construct(self): self.setup_axes() self.add_v_graph() self.write_s_formula() self.write_antiderivative() def add_v_graph(self): graph, label = self.get_v_graph_and_label() self.play(ShowCreation(graph)) self.play(Write(label)) self.graph = graph self.graph_label = label def write_s_formula(self): ds_dt = TexMobject("ds", "\\over\\,", "dt") ds_dt.set_color_by_tex("ds", DISTANCE_COLOR) ds_dt.set_color_by_tex("dt", TIME_COLOR) ds_dt.next_to(self.graph_label, UP, LARGE_BUFF) v_t = self.graph_label.get_part_by_tex("v(t)") arrow = Arrow( ds_dt.get_bottom(), v_t.get_top(), color = WHITE, ) self.play( Write(ds_dt, run_time = 2), ShowCreation(arrow) ) self.wait() def write_antiderivative(self): randy = Randolph() randy.to_corner(DOWN+LEFT) randy.shift(2*RIGHT) words = TexMobject( "{d(", "???", ") \\over \\,", "dt}", "=", "t(8-t)" ) words.set_color_by_tex("t(8-t)", VELOCITY_COLOR) words.set_color_by_tex("???", DISTANCE_COLOR) words.set_color_by_tex("dt", TIME_COLOR) words.scale(0.7) self.play(FadeIn(randy)) self.play(PiCreatureSays( randy, words, target_mode = "confused", bubble_kwargs = {"height" : 3, "width" : 4}, )) self.play(Blink(randy)) self.wait() class Antiderivative(PiCreatureScene): def construct(self): functions = self.get_functions("t^2", "2t") alt_functions = self.get_functions("???", "t(8-t)") top_arc, bottom_arc = arcs = self.get_arcs(functions) derivative, antiderivative = self.get_arc_labels(arcs) group = VGroup(functions, arcs, derivative, antiderivative) self.add(functions, top_arc, derivative) self.wait() self.play( ShowCreation(bottom_arc), Write(antiderivative), self.pi_creature.change_mode, "raise_right_hand" ) self.wait(2) for pair in reversed(list(zip(functions, alt_functions))): self.play( Transform(*pair), self.pi_creature.change_mode, "pondering" ) self.wait(2) self.pi_creature_says( "But first!", target_mode = "surprised", look_at_arg = 50*OUT, added_anims = [group.to_edge, LEFT], run_time = 1, ) self.wait() def get_functions(self, left_tex, right_tex): left = TexMobject(left_tex) left.shift(2*LEFT) left.set_color(DISTANCE_COLOR) right = TexMobject(right_tex) right.shift(2*RIGHT) right.set_color(VELOCITY_COLOR) result = VGroup(left, right) result.shift(UP) return result def get_arcs(self, functions): f1, f2 = functions top_line = Line(f1.get_corner(UP+RIGHT), f2.get_corner(UP+LEFT)) bottom_line = Line(f1.get_corner(DOWN+RIGHT), f2.get_corner(DOWN+LEFT)) top_arc = Arc(start_angle = 5*np.pi/6, angle = -2*np.pi/3) bottom_arc = top_arc.copy() bottom_arc.rotate(np.pi) arcs = VGroup(top_arc, bottom_arc) arcs.set_width(top_line.get_width()) for arc in arcs: arc.add_tip() top_arc.next_to(top_line, UP) bottom_arc.next_to(bottom_line, DOWN) bottom_arc.set_color(MAROON_B) return arcs def get_arc_labels(self, arcs): top_arc, bottom_arc = arcs derivative = TextMobject("Derivative") derivative.next_to(top_arc, UP) antiderivative = TextMobject("``Antiderivative''") antiderivative.next_to(bottom_arc, DOWN) antiderivative.set_color(bottom_arc.get_color()) return VGroup(derivative, antiderivative) class AreaUnderVGraph(PlotVelocity): def construct(self): self.setup_axes() self.add(*self.get_v_graph_and_label()) self.show_rects() def show_rects(self): rect_list = self.get_riemann_rectangles_list( self.v_graph, 7, max_dx = 1.0, x_min = 0, x_max = 8, ) flat_graph = self.get_graph(lambda t : 0) rects = self.get_riemann_rectangles( flat_graph, x_min = 0, x_max = 8, dx = 1.0 ) for new_rects in rect_list: new_rects.set_fill(opacity = 0.8) rects.align_submobjects(new_rects) for alt_rect in rects[::2]: alt_rect.set_fill(opacity = 0) self.play(Transform( rects, new_rects, run_time = 2, lag_ratio = 0.5 )) self.wait() class ConstantVelocityCar(Scene): def construct(self): car = Car() car.move_to(5*LEFT + 3*DOWN) self.add(car) self.wait() self.play(MoveCar( car, 7*RIGHT+3*DOWN, run_time = 5, rate_func=linear, )) self.wait() class ConstantVelocityPlot(PlotVelocity): CONFIG = { "x_axis_label" : "Time", "units_of_area_color" : BLUE_E, } def construct(self): self.setup_axes() self.x_axis_label_mob.shift(DOWN) self.draw_graph() self.show_product() self.comment_on_area_wierdness() self.note_units() def draw_graph(self): graph = self.get_graph( lambda t : 10, x_min = 0, x_max = 8, color = VELOCITY_COLOR ) self.play(ShowCreation(graph, rate_func=linear, run_time = 3)) self.wait() self.graph = graph def show_product(self): rect = Rectangle( stroke_width = 0, fill_color = DISTANCE_COLOR, fill_opacity = 0.5 ) rect.replace( VGroup(self.graph, VectorizedPoint(self.graph_origin)), stretch = True ) right_brace = Brace(rect, RIGHT) top_brace = Brace(rect, UP) v_label = right_brace.get_text( "$10 \\frac{\\text{meters}}{\\text{second}}$", ) v_label.set_color(VELOCITY_COLOR) t_label = top_brace.get_text( "8 seconds" ) t_label.set_color(TIME_COLOR) s_label = TexMobject("10", "\\times", "8", "\\text{ meters}") s_label.set_color_by_tex("10", VELOCITY_COLOR) s_label.set_color_by_tex("8", TIME_COLOR) s_label.move_to(rect) self.play( GrowFromCenter(right_brace), Write(v_label), ) self.play( GrowFromCenter(top_brace), Write(t_label), ) self.play( FadeIn(rect), Write(s_label), Animation(self.graph) ) self.wait(2) self.area_rect = rect self.s_label = s_label def comment_on_area_wierdness(self): randy = Randolph() randy.to_corner(DOWN+LEFT) bubble = randy.get_bubble( "Distance \\\\ is area?", bubble_class = ThoughtBubble, height = 3, width = 4, fill_opacity = 1, ) bubble.content.scale_in_place(0.8) bubble.content.shift(SMALL_BUFF*UP) VGroup(bubble[-1], bubble.content).shift(1.5*LEFT) self.play(FadeIn(randy)) self.play(randy.change_mode, "pondering") self.play( self.area_rect.set_color, YELLOW, *list(map(Animation, self.get_mobjects())), rate_func = there_and_back ) self.play(Blink(randy)) self.play( randy.change_mode, "confused", randy.look_at, randy.bubble, ShowCreation(bubble), Write(bubble.content), ) self.wait() self.play(Blink(randy)) self.wait() self.play( randy.change_mode, "pondering", FadeOut(bubble), FadeOut(bubble.content), ) self.randy = randy def note_units(self): x_line, y_line = lines = VGroup(*[ axis.copy() for axis in (self.x_axis, self.y_axis) ]) lines.set_color(TIME_COLOR) square = Square( stroke_color = BLACK, stroke_width = 1, fill_color = self.units_of_area_color, fill_opacity = 1, ) square.replace( VGroup(*[ VectorizedPoint(self.coords_to_point(i, i)) for i in (0, 1) ]), stretch = True ) units_of_area = VGroup(*[ square.copy().move_to( self.coords_to_point(x, y), DOWN+LEFT ) for x in range(8) for y in range(10) ]) self.play(ShowCreation(x_line)) self.play(Indicate(self.x_axis_label_mob)) self.play(FadeOut(x_line)) self.play( ShowCreation(y_line), self.randy.look_at, self.y_axis_label_mob ) self.play(Indicate(self.y_axis_label_mob)) self.play(FadeOut(y_line)) for FadeClass in FadeIn, FadeOut: self.play( FadeClass( units_of_area, lag_ratio = 0.5, run_time = 3 ), Animation(self.s_label), self.randy.look_at, self.area_rect ) self.play(Blink(self.randy)) self.wait() class PiecewiseConstantCar(Scene): def construct(self): car = Car() start_point = 5*LEFT car.move_to(start_point) self.add(car) self.wait() for shift in 2, 6, 12: car.randy.rotate_in_place(np.pi/8) anim = MoveCar( car, start_point+shift*RIGHT, rate_func=linear ) anim.target_mobject[0].rotate_in_place(-np.pi/8) # for mob in anim.starting_mobject, anim.mobject: # mob.randy.rotate_in_place(np.pi/6) self.play(anim) self.wait() class PiecewiseConstantPlot(PlotVelocity): CONFIG = { "y_axis_label" : "", "min_graph_proportion" : 0.1, "max_graph_proportion" : 0.8, "num_riemann_approximations" : 7, "riemann_rect_fill_opacity" : 0.75, "tick_size" : 0.2, } def construct(self): self.setup_graph() self.always_changing() self.show_piecewise_constant_graph() self.compute_distance_on_each_interval() self.approximate_original_curve() self.revert_to_specific_approximation() self.show_specific_rectangle() self.show_v_dt_for_all_rectangles() self.write_integral_symbol() self.roles_of_dt() self.what_does_sum_approach() self.label_integral() def setup_graph(self): self.setup_axes() self.add(*self.get_v_graph_and_label()) def always_changing(self): dot = Dot() arrow = Arrow(LEFT, RIGHT) words = TextMobject("Always changing") group = VGroup(dot, arrow, words) def update_group(group, alpha): dot, arrow, words = group prop = interpolate( self.min_graph_proportion, self.max_graph_proportion, alpha ) graph_point = self.v_graph.point_from_proportion(prop) dot.move_to(graph_point) x_val = self.x_axis.point_to_number(graph_point) angle = self.angle_of_tangent(x_val, self.v_graph) angle += np.pi/2 vect = rotate_vector(RIGHT, angle) arrow.rotate(angle - arrow.get_angle() + np.pi) arrow.shift( graph_point + MED_SMALL_BUFF*vect - arrow.get_end() ) words.next_to(arrow.get_start(), UP) return group update_group(group, 0) self.play( Write(words), ShowCreation(arrow), DrawBorderThenFill(dot), run_time = 1 ) self.play(UpdateFromAlphaFunc( group, update_group, rate_func = there_and_back, run_time = 5 )) self.wait() self.play(FadeOut(group)) def show_piecewise_constant_graph(self): pw_constant_graph = self.get_pw_constant_graph() alt_lines = [ line.copy().set_color(YELLOW) for line in pw_constant_graph[:4] ] for line in alt_lines: line.start_dot = Dot(line.get_start()) line.end_dot = Dot(line.get_end()) VGroup(line.start_dot, line.end_dot).set_color(line.get_color()) line = alt_lines[0] faders = [self.v_graph, self.v_graph_label] for mob in faders: mob.save_state() mob.generate_target() mob.target.fade(0.7) self.play(*list(map(MoveToTarget, faders))) self.play(ShowCreation(pw_constant_graph, run_time = 2)) self.wait() self.play(ShowCreation(line)) self.wait() for new_line in alt_lines[1:]: for mob in line.end_dot, new_line.start_dot, new_line: self.play(Transform( line, mob, run_time = 1./3 )) self.remove(line) self.add(new_line) self.wait(2) line = new_line self.play(FadeOut(line)) self.pw_constant_graph = pw_constant_graph def compute_distance_on_each_interval(self): rect_list = self.get_riemann_rectangles_list( self.v_graph, self.num_riemann_approximations, max_dx = 1, x_min = 0, x_max = 8, ) for rects in rect_list: rects.set_fill(opacity = self.riemann_rect_fill_opacity) flat_rects = self.get_riemann_rectangles( self.get_graph(lambda t : 0), x_min = 0, x_max = 8, dx = 1 ) rects = rect_list[0] rect = rects[1] flat_rects.submobjects[1] = rect.copy() right_brace = Brace(rect, RIGHT) top_brace = Brace(rect, UP) right_brace.label = right_brace.get_text("$7\\frac{\\text{m}}{\\text{s}}$") top_brace.label = top_brace.get_text("$1$s") self.play(FadeIn(rect)) for brace in right_brace, top_brace: self.play( GrowFromCenter(brace), Write(brace.label, run_time = 1), ) brace.add(brace.label) self.wait() self.play( ReplacementTransform( flat_rects, rects, run_time = 2, lag_ratio = 0.5, ), Animation(right_brace) ) self.play(*list(map(FadeOut, [top_brace, right_brace]))) self.wait() self.rects = rects self.rect_list = rect_list def approximate_original_curve(self): rects = self.rects self.play( FadeOut(self.pw_constant_graph), *[ m.restore for m in (self.v_graph, self.v_graph_label) ]+[Animation(self.rects)] ) for new_rects in self.rect_list[1:]: self.transform_between_riemann_rects(rects, new_rects) self.wait() def revert_to_specific_approximation(self): rects = self.rects rects.save_state() target_rects = self.rect_list[2] target_rects.set_fill(opacity = 1) ticks = self.get_ticks(target_rects) tick_pair = VGroup(*ticks[4:6]) brace = Brace(tick_pair, DOWN, buff = 0) dt_label = brace.get_text("$dt$", buff = SMALL_BUFF) example_text = TextMobject( "For example, \\\\", "$dt$", "$=0.25$" ) example_text.to_corner(UP+RIGHT) example_text.set_color_by_tex("dt", YELLOW) self.play(ReplacementTransform( rects, target_rects, run_time = 2, lag_ratio = 0.5 )) rects.restore() self.wait() self.play( ShowCreation(ticks), FadeOut(self.x_axis.numbers) ) self.play( GrowFromCenter(brace), Write(dt_label) ) self.wait() self.play( FadeIn( example_text, run_time = 2, lag_ratio = 0.5, ), ReplacementTransform( dt_label.copy(), example_text.get_part_by_tex("dt") ) ) self.wait() self.rects = rects = target_rects self.ticks = ticks self.dt_brace = brace self.dt_label = dt_label self.dt_example_text = example_text def show_specific_rectangle(self): rects = self.rects rect = rects[4].copy() rect_top = Line( rect.get_corner(UP+LEFT), rect.get_corner(UP+RIGHT), color = self.v_graph.get_color() ) t_vals = [1, 1.25] t_labels = VGroup(*[ TexMobject("t=%s"%str(t)) for t in t_vals ]) t_labels.scale(0.7) t_labels.next_to(rect, DOWN) for vect, label in zip([LEFT, RIGHT], t_labels): label.shift(1.5*vect) label.add(Arrow( label.get_edge_center(-vect), rect.get_corner(DOWN+vect), buff = SMALL_BUFF, tip_length = 0.15, color = WHITE )) v_lines = VGroup() h_lines = VGroup() height_labels = VGroup() for t in t_vals: v_line = self.get_vertical_line_to_graph( t, self.v_graph, color = YELLOW ) y_axis_point = self.graph_origin[0]*RIGHT y_axis_point += v_line.get_end()[1]*UP h_line = DashedLine(v_line.get_end(), y_axis_point) label = TexMobject("%.1f"%v_func(t)) label.scale(0.5) label.next_to(h_line, LEFT, SMALL_BUFF) v_lines.add(v_line) h_lines.add(h_line) height_labels.add(label) circle = Circle(radius = 0.25, color = WHITE) circle.move_to(rect.get_top()) self.play( rects.set_fill, None, 0.25, Animation(rect) ) self.wait() for label in t_labels: self.play(FadeIn(label)) self.wait() for v_line, h_line, label in zip(v_lines, h_lines, height_labels): self.play(ShowCreation(v_line)) self.play(ShowCreation(h_line)) self.play(Write(label, run_time = 1)) self.wait() self.wait() t_label_copy = t_labels[0].copy() self.play( t_label_copy.scale, 1./0.7, t_label_copy.next_to, self.v_graph_label, DOWN+LEFT, 0 ) self.wait() self.play(FadeOut(t_label_copy)) self.wait() self.play(ShowCreation(circle)) self.play(ShowCreation(rect_top)) self.play(FadeOut(circle)) rect.add(rect_top) self.wait() for x in range(2): self.play( rect.stretch_to_fit_height, v_lines[1].get_height(), rect.move_to, rect.get_bottom(), DOWN, Animation(v_lines), run_time = 4, rate_func = there_and_back ) self.play(*list(map(FadeOut, [ group[1] for group in (v_lines, h_lines, height_labels) ]))) self.play( v_lines[0].set_color, RED, rate_func = there_and_back, ) self.wait() area = TextMobject( "7$\\frac{\\text{m}}{\\text{s}}$", "$\\times$", "0.25s", "=", "1.75m" ) area.next_to(rect, RIGHT, LARGE_BUFF) arrow = Arrow( area.get_left(), rect.get_center(), buff = 0, color = WHITE ) area.shift(SMALL_BUFF*RIGHT) self.play( Write(area), ShowCreation(arrow) ) self.wait(2) self.play(*list(map(FadeOut, [ area, arrow, v_lines[0], h_lines[0], height_labels[0], rect, t_labels ]))) def show_v_dt_for_all_rectangles(self): dt_brace_group = VGroup(self.dt_brace, self.dt_label) rects_subset = self.rects[10:20] last_rect = None for rect in rects_subset: brace = Brace(rect, LEFT, buff = 0) v_t = TexMobject("v(t)") v_t.next_to(brace, LEFT, SMALL_BUFF) anims = [ rect.set_fill, None, 1, dt_brace_group.next_to, rect, DOWN, SMALL_BUFF ] if last_rect is not None: anims += [ last_rect.set_fill, None, 0.25, ReplacementTransform(last_brace, brace), ReplacementTransform(last_v_t, v_t), ] else: anims += [ GrowFromCenter(brace), Write(v_t) ] self.play(*anims) self.wait() last_rect = rect last_brace = brace last_v_t = v_t self.v_t = last_v_t self.v_t_brace = last_brace def write_integral_symbol(self): integral = TexMobject( "\\int", "^8", "_0", "v(t)", "\\,dt" ) integral.to_corner(UP+RIGHT) int_copy = integral.get_part_by_tex("int").copy() bounds = list(map(integral.get_part_by_tex, ["0", "8"])) sum_word = TextMobject("``Sum''") sum_word.next_to(integral, DOWN, MED_LARGE_BUFF, LEFT) alt_sum_word = sum_word.copy() int_symbol = TexMobject("\\int") int_symbol.replace(alt_sum_word[1], dim_to_match = 1) alt_sum_word.submobjects[1] = int_symbol self.play(FadeOut(self.dt_example_text)) self.play(Write(integral.get_part_by_tex("int"))) self.wait() self.play(Transform(int_copy, int_symbol)) self.play(Write(alt_sum_word), Animation(int_copy)) self.remove(int_copy) self.play(ReplacementTransform(alt_sum_word, sum_word)) self.wait() for bound in bounds: self.play(Write(bound)) self.wait() for bound, num in zip(bounds, [0, 8]): bound_copy = bound.copy() point = self.coords_to_point(num, 0) self.play( bound_copy.scale, 1.5, bound_copy.next_to, point, DOWN, MED_LARGE_BUFF ) self.play(ApplyWave(self.ticks, direction = UP)) self.wait() for mob, tex in (self.v_t, "v(t)"), (self.dt_label, "dt"): self.play(ReplacementTransform( mob.copy().set_color(YELLOW), integral.get_part_by_tex(tex), run_time = 2 )) self.wait() self.integral = integral self.sum_word = sum_word def roles_of_dt(self): rects = self.rects next_rects = self.rect_list[3] morty = Mortimer().flip() morty.to_corner(DOWN+LEFT) int_dt = self.integral.get_part_by_tex("dt") dt_copy = int_dt.copy() self.play(FadeIn(morty)) self.play( morty.change_mode, "raise_right_hand", morty.look, UP+RIGHT, dt_copy.next_to, morty.get_corner(UP+RIGHT), UP, dt_copy.set_color, YELLOW ) self.play(Blink(morty)) self.play( ReplacementTransform( dt_copy.copy(), int_dt, run_time = 2 ), morty.look_at, int_dt ) self.wait(2) self.play( ReplacementTransform(dt_copy.copy(), self.dt_label), morty.look_at, self.dt_label ) self.play(*[ ApplyMethod( tick.shift, tick.get_height()*UP/2, run_time = 2, rate_func = squish_rate_func( there_and_back, alpha, alpha+0.2, ) ) for tick, alpha in zip( self.ticks, np.linspace(0, 0.8, len(self.ticks)) ) ]) self.wait() #Shrink dt just a bit self.play( morty.change_mode, "pondering", rects.set_fill, None, 0.75, *list(map(FadeOut, [ dt_copy, self.v_t, self.v_t_brace ])) ) rects.align_submobjects(next_rects) for every_other_rect in rects[::2]: every_other_rect.set_fill(opacity = 0) self.play( self.dt_brace.stretch, 0.5, 0, self.dt_brace.move_to, self.dt_brace, LEFT, ReplacementTransform( rects, next_rects, run_time = 2, lag_ratio = 0.5 ), Transform( self.ticks, self.get_ticks(next_rects), run_time = 2, lag_ratio = 0.5, ), ) self.rects = rects = next_rects self.wait() self.play(Blink(morty)) self.play(*[ ApplyFunction( lambda r : r.shift(0.2*UP).set_fill(None, 1), rect, run_time = 2, rate_func = squish_rate_func( there_and_back, alpha, alpha+0.2, ) ) for rect, alpha in zip( rects, np.linspace(0, 0.8, len(rects)) ) ]+[ morty.change_mode, "thinking", ]) self.wait() self.morty = morty def what_does_sum_approach(self): morty = self.morty rects = self.rects cross = TexMobject("\\times") cross.replace(self.sum_word, stretch = True) cross.set_color(RED) brace = Brace(self.integral, DOWN) dt_to_0 = brace.get_text("$dt \\to 0$") distance_words = TextMobject( "Area", "= Distance traveled" ) distance_words.next_to(rects, UP) arrow = Arrow( distance_words[0].get_bottom(), rects.get_center(), color = WHITE ) self.play(PiCreatureSays( morty, "Why not $\\Sigma$?", target_mode = "sassy" )) self.play(Blink(morty)) self.wait() self.play(Write(cross)) self.wait() self.play( RemovePiCreatureBubble(morty, target_mode = "plain"), *list(map(FadeOut, [ cross, self.sum_word, self.ticks, self.dt_brace, self.dt_label, ])) ) self.play(FadeIn(brace), FadeIn(dt_to_0)) for new_rects in self.rect_list[4:]: rects.align_submobjects(new_rects) for every_other_rect in rects[::2]: every_other_rect.set_fill(opacity = 0) self.play( Transform( rects, new_rects, run_time = 2, lag_ratio = 0.5 ), morty.look_at, rects, ) self.wait() self.play( Write(distance_words), ShowCreation(arrow), morty.change_mode, "pondering", morty.look_at, distance_words, ) self.wait() self.play(Blink(morty)) self.wait() self.area_arrow = arrow def label_integral(self): words = TextMobject("``Integral of $v(t)$''") words.to_edge(UP) arrow = Arrow( words.get_right(), self.integral.get_left() ) self.play(Indicate(self.integral)) self.play(Write(words, run_time = 2)) self.play(ShowCreation(arrow)) self.wait() self.play(*[ ApplyFunction( lambda r : r.shift(0.2*UP).set_fill(None, 1), rect, run_time = 3, rate_func = squish_rate_func( there_and_back, alpha, alpha+0.2, ) ) for rect, alpha in zip( self.rects, np.linspace(0, 0.8, len(self.rects)) ) ]+[ Animation(self.area_arrow), self.morty.change_mode, "happy", self.morty.look_at, self.rects, ]) self.wait() ##### def get_pw_constant_graph(self): result = VGroup() for left_x in range(8): xs = [left_x, left_x+1] y = self.v_graph.underlying_function(left_x) line = Line(*[ self.coords_to_point(x, y) for x in xs ]) line.set_color(self.v_graph.get_color()) result.add(line) return result def get_ticks(self, rects): ticks = VGroup(*[ Line( point+self.tick_size*UP/2, point+self.tick_size*DOWN/2 ) for t in np.linspace(0, 8, len(rects)+1) for point in [self.coords_to_point(t, 0)] ]) ticks.set_color(YELLOW) return ticks class DontKnowHowToHandleNonConstant(TeacherStudentsScene): def construct(self): self.play(*[ ApplyMethod(pi.change, "maybe", UP) for pi in self.get_pi_creatures() ]) self.wait(3) class CarJourneyApproximation(Scene): CONFIG = { "n_jumps" : 5, "bottom_words" : "Approximated motion (5 jumps)", } def construct(self): points = [5*LEFT + v for v in (UP, 2*DOWN)] cars = [Car().move_to(point) for point in points] h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS) words = [ TextMobject("Real motion (smooth)").shift(3*UP), TextMobject(self.bottom_words).shift(0.5*DOWN), ] words[1].set_color(GREEN) self.add(h_line, *cars + words) self.wait() self.play(*[ MoveCar( car, point+10*RIGHT, run_time = 5, rate_func = rf ) for car, point, rf in zip(cars, points, [ s_rate_func, self.get_approximated_rate_func(self.n_jumps) ]) ]) self.wait() def get_approximated_rate_func(self, n): new_v_rate_func = lambda t : v_rate_func(np.floor(t*n)/n) max_integral, err = scipy.integrate.quad( v_rate_func, 0, 1 ) def result(t): integral, err = scipy.integrate.quad(new_v_rate_func, 0, t) return integral/max_integral return result class LessWrongCarJourneyApproximation(CarJourneyApproximation): CONFIG = { "n_jumps" : 20, "bottom_words" : "Better approximation (20 jumps)", } class TellMeThatsNotSurprising(TeacherStudentsScene): def construct(self): self.teacher_says( "Tell me that's \\\\ not surprising!", target_mode = "hooray", run_time = 1 ) self.wait(3) class HowDoesThisHelp(TeacherStudentsScene): def construct(self): self.student_says( "How does this help\\textinterrobang", target_mode = "angry", run_time = 1 ) self.change_student_modes( "confused", "angry", "confused", ) self.wait(2) self.teacher_says( "You're right.", target_mode = "shruggie", run_time = 1 ) self.change_student_modes(*["sassy"]*3) self.wait(2) class AreaUnderACurve(GraphScene): CONFIG = { "y_max" : 4, "y_min" : 0, "num_iterations" : 7 } def construct(self): self.setup_axes() graph = self.get_graph(self.func) rect_list = self.get_riemann_rectangles_list( graph, self.num_iterations ) VGroup(*rect_list).set_fill(opacity = 0.8) rects = rect_list[0] self.play(ShowCreation(graph)) self.play(Write(rects)) for new_rects in rect_list[1:]: rects.align_submobjects(new_rects) for every_other_rect in rects[::2]: every_other_rect.set_fill(opacity = 0) self.play(Transform( rects, new_rects, run_time = 2, lag_ratio = 0.5 )) self.wait() def func(self, x): return np.sin(x) + 1 class AltAreaUnderCurve(AreaUnderACurve): CONFIG = { "graph_origin" : 2*DOWN, "x_min" : -3, "x_max" : 3, "x_axis_width" : 12, "y_max" : 2, "y_axis_height" : 4, } def func(self, x): return np.exp(-x**2) class Chapter1Wrapper(Chapter2Wrapper): CONFIG = { "title" : "Essence of calculus, chapter 1", } class AreaIsDerivative(PlotVelocity, ReconfigurableScene): CONFIG = { "y_axis_label" : "", "num_rects" : 400, "dT" : 0.25, "variable_point_label" : "T", "area_opacity" : 0.8, } def setup(self): PlotVelocity.setup(self) ReconfigurableScene.setup(self) self.setup_axes() self.add(*self.get_v_graph_and_label()) self.x_axis_label_mob.shift(MED_LARGE_BUFF*DOWN) self.v_graph_label.shift(MED_LARGE_BUFF*DOWN) self.submobjects = [] def construct(self): self.introduce_variable_area() self.write_integral() self.nudge_input() self.show_rectangle_approximation() def introduce_variable_area(self): area = self.area = self.get_area(0, 6) x_nums = self.x_axis.numbers self.play(Write(area, run_time = 2)) self.play(FadeOut(self.x_axis.numbers)) self.add_T_label(6) self.change_area_bounds( new_t_max = 4, rate_func = there_and_back, run_time = 2 ) self.wait() def write_integral(self): integral = TexMobject("\\int", "^T", "_0", "v(t)", "\\,dt") integral.to_corner(UP+RIGHT) integral.shift(2*LEFT) top_T = integral.get_part_by_tex("T") moving_T = self.T_label_group[0] s_T = TexMobject("s(T)", "= ") s_T.set_color_by_tex("s", DISTANCE_COLOR) s_T.next_to(integral, LEFT) int_arrow, s_arrow = [ Arrow( mob.get_left(), self.area.get_center(), color = WHITE ) for mob in (integral, s_T) ] distance_word = TextMobject("Distance") distance_word.move_to(self.area) self.play(Write(integral)) self.play(ShowCreation(int_arrow)) self.submobjects.append(int_arrow) self.wait() self.change_area_bounds( new_t_max = 8, rate_func = there_and_back, run_time = 3, ) self.play(Indicate(top_T)) self.play(ReplacementTransform( top_T.copy(), moving_T )) self.change_area_bounds( new_t_max = 3, rate_func = there_and_back, run_time = 3 ) self.wait() self.play(Write(distance_word, run_time = 2)) self.play( ReplacementTransform(int_arrow, s_arrow), FadeIn(s_T) ) self.wait() self.play(FadeOut(distance_word)) self.change_area_bounds(new_t_max = 0, run_time = 2) self.change_area_bounds( new_t_max = 8, rate_func=linear, run_time = 7.9, ) self.wait() self.change_area_bounds(new_t_max = 5) self.wait() def nudge_input(self): dark_area = self.area.copy() dark_area.set_fill(BLACK, opacity = 0.5) curr_T = self.x_axis.point_to_number(self.area.get_right()) new_T = curr_T + self.dT rect = Rectangle( stroke_width = 0, fill_color = YELLOW, fill_opacity = 0.75 ) rect.replace( VGroup( VectorizedPoint(self.coords_to_point(new_T, 0)), self.right_v_line, ), stretch = True ) dT_brace = Brace(rect, DOWN, buff = 0) dT_label = dT_brace.get_text("$dT$", buff = SMALL_BUFF) dT_label_group = VGroup(dT_label, dT_brace) ds_label = TexMobject("ds") ds_label.next_to(rect, RIGHT, LARGE_BUFF, UP) ds_label.set_color(DISTANCE_COLOR) ds_arrow = Arrow(ds_label.get_left(), rect.get_left()) ds_arrow.set_color(WHITE) v_brace = Brace(rect, LEFT, buff = SMALL_BUFF) v_T_label = v_brace.get_text("$v(T)$", buff = SMALL_BUFF) self.change_area_bounds(new_t_max = new_T) self.play( FadeIn(dark_area), *list(map(Animation, self.submobjects)) ) self.play( FadeOut(self.T_label_group), FadeIn(dT_label_group) ) self.wait() self.play(Write(ds_label)) self.play(ShowCreation(ds_arrow)) self.wait(2) self.play(GrowFromCenter(v_brace)) self.play(ReplacementTransform( self.v_graph_label.get_part_by_tex("v").copy(), v_T_label, run_time = 2 )) self.wait() self.play(Indicate(dT_label)) self.wait() self.rect = rect self.dT_label_group = dT_label_group self.v_T_label_group = VGroup(v_T_label, v_brace) self.dark_area = dark_area self.ds_label = ds_label self.ds_arrow = ds_arrow def show_rectangle_approximation(self): formula1 = TexMobject("ds", "=", "v(T)", "dT") formula2 = TexMobject("{ds", "\\over\\,", "dT}", "=", "v(T)") for formula in formula1, formula2: formula.next_to(self.v_graph_label, UP, LARGE_BUFF) formula.set_color_by_tex("ds", DISTANCE_COLOR) self.play( DrawBorderThenFill(self.rect), Animation(self.ds_arrow) ) self.wait() self.play(*[ ReplacementTransform( mob, formula1.get_part_by_tex(tex), run_time = 2 ) for mob, tex in [ (self.ds_label, "ds"), (self.ds_arrow, "="), (self.v_T_label_group[0].copy(), "v(T)"), (self.dT_label_group[0].copy(), "dT"), ] ]) self.wait() self.transition_to_alt_config( dT = self.dT/5.0, transformation_kwargs = {"run_time" : 2}, ) self.wait() self.play(*[ ReplacementTransform( formula1.get_part_by_tex(tex), formula2.get_part_by_tex(tex), ) for tex in ("ds", "=", "v(T)", "dT") ] + [ Write(formula2.get_part_by_tex("over")) ]) self.wait() #### def add_T_label(self, x_val, **kwargs): triangle = RegularPolygon(n=3, start_angle = np.pi/2) triangle.set_height(MED_SMALL_BUFF) triangle.move_to(self.coords_to_point(x_val, 0), UP) triangle.set_fill(WHITE, 1) triangle.set_stroke(width = 0) T_label = TexMobject(self.variable_point_label) T_label.next_to(triangle, DOWN) v_line = self.get_vertical_line_to_graph( x_val, self.v_graph, color = YELLOW ) self.play( DrawBorderThenFill(triangle), ShowCreation(v_line), Write(T_label, run_time = 1), **kwargs ) self.T_label_group = VGroup(T_label, triangle) self.right_v_line = v_line def get_area(self, t_min, t_max): numerator = max(t_max - t_min, 0.01) dx = float(numerator) / self.num_rects return self.get_riemann_rectangles( self.v_graph, x_min = t_min, x_max = t_max, dx = dx, stroke_width = 0, ).set_fill(opacity = self.area_opacity) def change_area_bounds(self, new_t_min = None, new_t_max = None, **kwargs): curr_t_min = self.x_axis.point_to_number(self.area.get_left()) curr_t_max = self.x_axis.point_to_number(self.area.get_right()) if new_t_min is None: new_t_min = curr_t_min if new_t_max is None: new_t_max = curr_t_max group = VGroup(self.area, self.right_v_line, self.T_label_group) def update_group(group, alpha): area, v_line, T_label = group t_min = interpolate(curr_t_min, new_t_min, alpha) t_max = interpolate(curr_t_max, new_t_max, alpha) new_area = self.get_area(t_min, t_max) new_v_line = self.get_vertical_line_to_graph( t_max, self.v_graph ) new_v_line.set_color(v_line.get_color()) T_label.move_to(new_v_line.get_bottom(), UP) #Fade close to 0 T_label[0].set_fill(opacity = min(1, t_max)) Transform(area, new_area).update(1) Transform(v_line, new_v_line).update(1) return group self.play( UpdateFromAlphaFunc(group, update_group), *list(map(Animation, self.submobjects)), **kwargs ) class DirectInterpretationOfDsDt(TeacherStudentsScene): def construct(self): equation = TexMobject("{ds", "\\over\\,", "dT}", "(T)", "=", "v(T)") ds, over, dt, of_T, equals, v = equation equation.next_to(self.get_pi_creatures(), UP, LARGE_BUFF) equation.shift(RIGHT) v.set_color(VELOCITY_COLOR) s_words = TextMobject("Tiny change in", "distance") s_words.next_to(ds, UP+LEFT, LARGE_BUFF) s_words.shift_onto_screen() s_arrow = Arrow(s_words[1].get_bottom(), ds.get_left()) s_words.add(s_arrow) s_words.set_color(DISTANCE_COLOR) t_words = TextMobject("Tiny change in", "time") t_words.next_to(dt, DOWN+LEFT) t_words.to_edge(LEFT) t_arrow = Arrow(t_words[1].get_top(), dt.get_left()) t_words.add(t_arrow) t_words.set_color(TIME_COLOR) self.add(ds, over, dt, of_T) for words, part in (s_words, ds), (t_words, dt): self.play( FadeIn( words, run_time = 2, lag_ratio = 0.5, ), self.students[1].change_mode, "raise_right_hand" ) self.play(part.set_color, words.get_color()) self.wait() self.play(Write(VGroup(equals, v))) self.change_student_modes(*["pondering"]*3) self.wait(3) class FindAntiderivative(Antiderivative): def construct(self): self.introduce() self.first_part() self.second_part() self.combine() self.add_plus_C() def introduce(self): q_marks, rhs = functions = self.get_functions("???", "t(8-t)") expanded_rhs = TexMobject("8t - t^2") expanded_rhs.move_to(rhs, LEFT) expanded_rhs.set_color(rhs.get_color()) self.v_part1 = VGroup(*expanded_rhs[:2]) self.v_part2 = VGroup(*expanded_rhs[2:]) for part in self.v_part1, self.v_part2: part.save_state() top_arc, bottom_arc = arcs = self.get_arcs(functions) derivative, antiderivative = words = self.get_arc_labels(arcs) self.add(functions) self.play(*list(map(ShowCreation, arcs))) for word in words: self.play(FadeIn(word, lag_ratio = 0.5)) self.wait() self.change_mode("confused") self.wait(2) self.play(*[ ReplacementTransform( rhs[i], expanded_rhs[j], run_time = 2, path_arc = np.pi ) for i, j in enumerate([1, 4, 0, 2, 3, 4]) ]+[ self.pi_creature.change_mode, "hesitant" ]) self.wait() self.q_marks = q_marks self.arcs = arcs self.words = words def first_part(self): four_t_squared, two_t = self.get_functions("4t^2", "2t") four = four_t_squared[0] four.shift(UP) four.set_fill(opacity = 0) t_squared = VGroup(*four_t_squared[1:]) two_t.move_to(self.v_part1, LEFT) self.play(self.v_part2.to_corner, UP+RIGHT) self.play( self.pi_creature.change, "plain", self.v_part1 ) self.play(ApplyWave( self.q_marks, direction = UP, amplitude = SMALL_BUFF )) self.wait(2) self.play( FadeOut(self.q_marks), FadeIn(t_squared), self.v_part1.shift, DOWN+RIGHT, ) self.play(*[ ReplacementTransform( t_squared[i].copy(), two_t[1-i], run_time = 2, path_arc = -np.pi/6. ) for i in (0, 1) ]) self.change_mode("thinking") self.wait() self.play(four.set_fill, YELLOW, 1) self.play(four.shift, DOWN) self.play(FadeOut(two_t)) self.play(self.v_part1.restore) self.play(four.set_color, DISTANCE_COLOR) self.wait(2) self.s_part1 = four_t_squared def second_part(self): self.arcs_copy = self.arcs.copy() self.words_copy = self.words.copy() part1_group = VGroup( self.s_part1, self.v_part1, self.arcs_copy, self.words_copy ) neg_third_t_cubed, three_t_squared = self.get_functions( "- \\frac{1}{3} t^3", "3t^2" ) three_t_squared.move_to(self.v_part1, LEFT) neg = neg_third_t_cubed[0] third = VGroup(*neg_third_t_cubed[1:4]) t_cubed = VGroup(*neg_third_t_cubed[4:]) three = three_t_squared[0] t_squared = VGroup(*three_t_squared[1:]) self.play( part1_group.scale, 0.5, part1_group.to_corner, UP+LEFT, self.pi_creature.change_mode, "plain" ) self.play( self.v_part2.restore, self.v_part2.shift, LEFT ) self.play(FadeIn(self.q_marks)) self.wait() self.play( FadeOut(self.q_marks), FadeIn(t_cubed), self.v_part2.shift, DOWN+RIGHT ) self.play(*[ ReplacementTransform( t_cubed[i].copy(), three_t_squared[j], path_arc = -np.pi/6, run_time = 2, ) for i, j in [(0, 1), (1, 0), (1, 2)] ]) self.wait() self.play(FadeIn(third)) self.play(FadeOut(three)) self.wait(2) self.play(Write(neg)) self.play( FadeOut(t_squared), self.v_part2.shift, UP+LEFT ) self.wait(2) self.s_part2 = neg_third_t_cubed def combine(self): self.play( self.v_part1.restore, self.v_part2.restore, self.s_part1.scale, 2, self.s_part1.next_to, self.s_part2, LEFT, FadeOut(self.arcs_copy), FadeOut(self.words_copy), run_time = 2, ) self.change_mode("happy") self.wait(2) def add_plus_C(self): s_group = VGroup(self.s_part1, self.s_part2) plus_Cs = [ TexMobject("+%d"%d) for d in range(1, 8) ] for plus_C in plus_Cs: plus_C.set_color(YELLOW) plus_C.move_to(s_group, RIGHT) plus_C = plus_Cs[0] self.change_mode("sassy") self.wait() self.play( s_group.next_to, plus_C.copy(), LEFT, GrowFromCenter(plus_C), ) self.wait() for new_plus_C in plus_Cs[1:]: self.play(Transform(plus_C, new_plus_C)) self.wait() class GraphSPlusC(GraphDistanceVsTime): CONFIG = { "y_axis_label" : "Distance" } def construct(self): self.setup_axes() graph = self.get_graph( s_func, color = DISTANCE_COLOR, x_min = 0, x_max = 8, ) tangent = self.get_secant_slope_group( 6, graph, dx = 0.01 ).secant_line v_line = self.get_vertical_line_to_graph( 6, graph, line_class = DashedLine ) v_line.scale_in_place(2) v_line.set_color(WHITE) graph_label, plus_C = full_label = TexMobject( "s(t) = 4t^2 - \\frac{1}{3}t^3", "+C" ) plus_C.set_color(YELLOW) full_label.next_to(graph.points[-1], DOWN) full_label.to_edge(RIGHT) self.play(ShowCreation(graph)) self.play(FadeIn(graph_label)) self.wait() self.play( graph.shift, UP, run_time = 2, rate_func = there_and_back ) self.play(ShowCreation(tangent)) graph.add(tangent) self.play(ShowCreation(v_line)) self.play( graph.shift, 2*DOWN, run_time = 4, rate_func = there_and_back, ) self.play(Write(plus_C)) self.play( graph.shift, 2*UP, rate_func = there_and_back, run_time = 4, ) self.wait() class LowerBound(AreaIsDerivative): CONFIG = { "graph_origin" : 2.5*DOWN + 6*LEFT } def construct(self): self.add_integral_and_area() self.mention_lower_bound() self.drag_right_endpoint_to_zero() self.write_antiderivative_difference() self.show_alternate_antiderivative_difference() self.add_constant_to_antiderivative() def add_integral_and_area(self): self.area = self.get_area(0, 6) self.integral = self.get_integral("0", "T") self.remove(self.x_axis.numbers) self.add(self.area, self.integral) self.add_T_label(6, run_time = 0) def mention_lower_bound(self): lower_bound = self.integral.get_part_by_tex("0") circle = Circle(color = YELLOW) circle.replace(lower_bound) circle.scale_in_place(3) zero_label = lower_bound.copy() self.play(ShowCreation(circle)) self.play(Indicate(lower_bound)) self.play( zero_label.scale, 1.5, zero_label.next_to, self.graph_origin, DOWN, MED_LARGE_BUFF, FadeOut(circle) ) self.wait() self.zero_label = zero_label def drag_right_endpoint_to_zero(self): zero_integral = self.get_integral("0", "0") zero_integral[1].set_color(YELLOW) zero_int_bounds = list(reversed( zero_integral.get_parts_by_tex("0") )) for bound in zero_int_bounds: circle = Circle(color = YELLOW) circle.replace(bound) circle.scale_in_place(3) bound.circle = circle self.integral.save_state() equals_zero = TexMobject("=0") equals_zero.next_to(zero_integral, RIGHT) equals_zero.set_color(GREEN) self.change_area_bounds(0, 0, run_time = 3) self.play(ReplacementTransform( self.zero_label.copy(), equals_zero )) self.play(Transform(self.integral, zero_integral)) self.wait(2) for bound in zero_int_bounds: self.play(ShowCreation(bound.circle)) self.play(FadeOut(bound.circle)) self.play(*[ ReplacementTransform( bound.copy(), VGroup(equals_zero[1]) ) for bound in zero_int_bounds ]) self.wait(2) self.change_area_bounds(0, 5) self.play( self.integral.restore, FadeOut(equals_zero) ) self.zero_integral = zero_integral def write_antiderivative_difference(self): antideriv_diff = self.get_antiderivative_difference("0", "T") equals, at_T, minus, at_zero = antideriv_diff antideriv_diff_at_eight = self.get_antiderivative_difference("0", "8") at_eight = antideriv_diff_at_eight.left_part integral_at_eight = self.get_integral("0", "8") for part in at_T, at_zero, at_eight: part.brace = Brace(part, DOWN, buff = SMALL_BUFF) part.brace.save_state() antideriv_text = at_T.brace.get_text("Antiderivative", buff = SMALL_BUFF) antideriv_text.set_color(MAROON_B) value_at_eight = at_eight.brace.get_text( "%.2f"%s_func(8) ) happens_to_be_zero = at_zero.brace.get_text(""" Happens to equal 0 """) big_brace = Brace(VGroup(at_T, at_zero)) cancel_text = big_brace.get_text("Cancels when $T=0$") self.play(*list(map(Write, [equals, at_T]))) self.play( GrowFromCenter(at_T.brace), Write(antideriv_text, run_time = 2) ) self.change_area_bounds(0, 5.5, rate_func = there_and_back) self.wait() self.play( ReplacementTransform(at_T.copy(), at_zero), Write(minus) ) self.wait() self.play( ReplacementTransform(at_T.brace, big_brace), ReplacementTransform(antideriv_text, cancel_text) ) self.change_area_bounds(0, 0, run_time = 4) self.wait() self.play( ReplacementTransform(big_brace, at_zero.brace), ReplacementTransform(cancel_text, happens_to_be_zero), ) self.wait(2) self.change_area_bounds(0, 8, run_time = 2) self.play( Transform(self.integral, integral_at_eight), Transform(antideriv_diff, antideriv_diff_at_eight), MaintainPositionRelativeTo(at_zero.brace, at_zero), MaintainPositionRelativeTo(happens_to_be_zero, at_zero.brace), ) self.play( GrowFromCenter(at_eight.brace), Write(value_at_eight) ) self.wait(2) self.play(*list(map(FadeOut, [ at_eight.brace, value_at_eight, at_zero.brace, happens_to_be_zero, ]))) self.antideriv_diff = antideriv_diff def show_alternate_antiderivative_difference(self): new_integral = self.get_integral("1", "7") new_antideriv_diff = self.get_antiderivative_difference("1", "7") numbers = [ TexMobject("%d"%d).next_to( self.coords_to_point(d, 0), DOWN, MED_LARGE_BUFF ) for d in (1, 7) ] tex_mobs = [new_integral]+new_antideriv_diff[1::2]+numbers for tex_mob in tex_mobs: tex_mob.set_color_by_tex("1", RED) tex_mob.set_color_by_tex("7", GREEN) tex_mob.set_color_by_tex("\\frac{1}{3}", WHITE) self.change_area_bounds(1, 7, run_time = 2) self.play( self.T_label_group[0].set_fill, None, 0, *list(map(FadeIn, numbers)) ) self.play( Transform(self.integral, new_integral), Transform(self.antideriv_diff, new_antideriv_diff), ) self.wait(3) for part in self.antideriv_diff[1::2]: self.play(Indicate(part, scale_factor = 1.1)) self.wait() def add_constant_to_antiderivative(self): antideriv_diff = self.antideriv_diff plus_fives = VGroup(*[TexMobject("+5") for i in range(2)]) plus_fives.set_color(YELLOW) for five, part in zip(plus_fives, antideriv_diff[1::2]): five.next_to(part, DOWN) group = VGroup( plus_fives[0], antideriv_diff[2].copy(), plus_fives[1] ) self.play(Write(plus_fives, run_time = 2)) self.wait(2) self.play( group.arrange, group.next_to, antideriv_diff, DOWN, MED_LARGE_BUFF ) self.wait() self.play(FadeOut(group, run_time = 2)) self.wait() ##### def get_integral(self, lower_bound, upper_bound): result = TexMobject( "\\int", "^"+upper_bound, "_"+lower_bound, "t(8-t)", "\\,dt" ) result.next_to(self.graph_origin, RIGHT, MED_LARGE_BUFF) result.to_edge(UP) return result def get_antiderivative_difference(self, lower_bound, upper_bound): strings = [] for bound in upper_bound, lower_bound: try: d = int(bound) strings.append("(%d)"%d) except: strings.append(bound) parts = [] for s in strings: part = TexMobject( "\\left(", "4", s, "^2", "-", "\\frac{1}{3}", s, "^3" "\\right))" ) part.set_color_by_tex(s, YELLOW, substring = False) parts.append(part) result = VGroup( TexMobject("="), parts[0], TexMobject("-"), parts[1], ) result.left_part, result.right_part = parts result.arrange(RIGHT) result.scale(0.9) result.next_to(self.integral, RIGHT) return result class FundamentalTheorem(GraphScene): CONFIG = { "lower_bound" : 1, "upper_bound" : 7, "lower_bound_color" : RED, "upper_bound_color" : GREEN, "n_riemann_iterations" : 6, } def construct(self): self.add_graph_and_integral() self.show_f_dx_sum() self.show_rects_approaching_area() self.write_antiderivative() self.write_fundamental_theorem_of_calculus() self.show_integral_considering_continuum() self.show_antiderivative_considering_bounds() def add_graph_and_integral(self): self.setup_axes() integral = TexMobject("\\int", "^b", "_a", "f(x)", "\\,dx") integral.next_to(ORIGIN, LEFT) integral.to_edge(UP) integral.set_color_by_tex("a", self.lower_bound_color) integral.set_color_by_tex("b", self.upper_bound_color) graph = self.get_graph( lambda x : -0.01*x*(x-3)*(x-6)*(x-12) + 3, ) self.add(integral, graph) self.graph = graph self.integral = integral self.bound_labels = VGroup() self.v_lines = VGroup() for bound, tex in (self.lower_bound, "a"), (self.upper_bound, "b"): label = integral.get_part_by_tex(tex).copy() label.scale(1.5) label.next_to(self.coords_to_point(bound, 0), DOWN) v_line = self.get_vertical_line_to_graph( bound, graph, color = label.get_color() ) self.bound_labels.add(label) self.v_lines.add(v_line) self.add(label, v_line) def show_f_dx_sum(self): kwargs = { "x_min" : self.lower_bound, "x_max" : self.upper_bound, "fill_opacity" : 0.75, "stroke_width" : 0.25, } low_opacity = 0.25 start_rect_index = 3 num_shown_sum_steps = 5 last_rect_index = start_rect_index + num_shown_sum_steps + 1 self.rect_list = self.get_riemann_rectangles_list( self.graph, self.n_riemann_iterations, **kwargs ) rects = self.rects = self.rect_list[0] rects.save_state() start_rect = rects[start_rect_index] f_brace = Brace(start_rect, LEFT, buff = 0) dx_brace = Brace(start_rect, DOWN, buff = 0) f_brace.label = f_brace.get_text("$f(x)$") dx_brace.label = dx_brace.get_text("$dx$") flat_rects = self.get_riemann_rectangles( self.get_graph(lambda x : 0), dx = 0.5, **kwargs ) self.transform_between_riemann_rects( flat_rects, rects, replace_mobject_with_target_in_scene = True, ) self.play(*[ ApplyMethod( rect.set_fill, None, 1 if rect is start_rect else low_opacity ) for rect in rects ]) self.play(*it.chain( list(map(GrowFromCenter, [f_brace, dx_brace])), list(map(Write, [f_brace.label, dx_brace.label])), )) self.wait() for i in range(start_rect_index+1, last_rect_index): self.play( rects[i-1].set_fill, None, low_opacity, rects[i].set_fill, None, 1, f_brace.set_height, rects[i].get_height(), f_brace.next_to, rects[i], LEFT, 0, dx_brace.next_to, rects[i], DOWN, 0, *[ MaintainPositionRelativeTo(brace.label, brace) for brace in (f_brace, dx_brace) ] ) self.wait() self.play(*it.chain( list(map(FadeOut, [ f_brace, dx_brace, f_brace.label, dx_brace.label ])), [rects.set_fill, None, kwargs["fill_opacity"]] )) def show_rects_approaching_area(self): for new_rects in self.rect_list: self.transform_between_riemann_rects( self.rects, new_rects ) def write_antiderivative(self): deriv = TexMobject( "{d", "F", "\\over\\,", "dx}", "(x)", "=", "f(x)" ) deriv_F = deriv.get_part_by_tex("F") deriv.next_to(self.integral, DOWN, MED_LARGE_BUFF) rhs = TexMobject(*"=F(b)-F(a)") rhs.set_color_by_tex("a", self.lower_bound_color) rhs.set_color_by_tex("b", self.upper_bound_color) rhs.next_to(self.integral, RIGHT) self.play(Write(deriv)) self.wait(2) self.play(*it.chain( [ ReplacementTransform(deriv_F.copy(), part) for part in rhs.get_parts_by_tex("F") ], [ Write(VGroup(*rhs.get_parts_by_tex(tex))) for tex in "=()-" ] )) for tex in "b", "a": self.play(ReplacementTransform( self.integral.get_part_by_tex(tex).copy(), rhs.get_part_by_tex(tex) )) self.wait() self.wait(2) self.deriv = deriv self.rhs = rhs def write_fundamental_theorem_of_calculus(self): words = TextMobject(""" Fundamental theorem of calculus """) words.to_edge(RIGHT) self.play(Write(words)) self.wait() def show_integral_considering_continuum(self): self.play(*[ ApplyMethod(mob.set_fill, None, 0.2) for mob in (self.deriv, self.rhs) ]) self.play( self.rects.restore, run_time = 3, rate_func = there_and_back ) self.wait() for x in range(2): self.play(*[ ApplyFunction( lambda m : m.shift(MED_SMALL_BUFF*UP).set_fill(opacity = 1), rect, run_time = 3, rate_func = squish_rate_func( there_and_back, alpha, alpha+0.2 ) ) for rect, alpha in zip( self.rects, np.linspace(0, 0.8, len(self.rects)) ) ]) self.wait() def show_antiderivative_considering_bounds(self): self.play( self.integral.set_fill, None, 0.5, self.deriv.set_fill, None, 1, self.rhs.set_fill, None, 1, ) for label, line in reversed(list(zip(self.bound_labels, self.v_lines))): new_line = line.copy().set_color(YELLOW) label.save_state() self.play(label.set_color, YELLOW) self.play(ShowCreation(new_line)) self.play(ShowCreation(line)) self.remove(new_line) self.play(label.restore) self.wait() self.play(self.integral.set_fill, None, 1) self.wait(3) class LetsRecap(TeacherStudentsScene): def construct(self): self.teacher_says( "Let's recap", target_mode = "hesitant", ) self.change_student_modes(*["happy"]*3) self.wait(3) class NegativeArea(GraphScene): CONFIG = { "x_axis_label" : "Time", "y_axis_label" : "Velocity", "graph_origin" : 1.5*DOWN + 5*LEFT, "y_min" : -3, "y_max" : 7, "small_dx" : 0.01, "sample_input" : 5, } def construct(self): self.setup_axes() self.add_graph_and_area() self.write_negative_area() self.show_negative_point() self.show_car_going_backwards() self.write_v_dt() self.show_rectangle() self.write_signed_area() def add_graph_and_area(self): graph = self.get_graph( lambda x : -0.02*(x+1)*(x-3)*(x-7)*(x-10), x_min = 0, x_max = 8, color = VELOCITY_COLOR ) area = self.get_riemann_rectangles( graph, x_min = 0, x_max = 8, dx = self.small_dx, start_color = BLUE_D, end_color = BLUE_D, fill_opacity = 0.75, stroke_width = 0, ) self .play( ShowCreation(graph), FadeIn( area, run_time = 2, lag_ratio = 0.5, ) ) self.graph = graph self.area = area def write_negative_area(self): words = TextMobject("Negative area") words.set_color(RED) words.next_to( self.coords_to_point(7, -2), RIGHT, ) arrow = Arrow(words, self.coords_to_point( self.sample_input, -1, )) self.play( Write(words, run_time = 2), ShowCreation(arrow) ) self.wait(2) self.play(*list(map(FadeOut, [self.area, arrow]))) self.negative_area_words = words def show_negative_point(self): v_line = self.get_vertical_line_to_graph( self.sample_input, self.graph, color = RED ) self.play(ShowCreation(v_line)) self.wait() self.v_line = v_line def show_car_going_backwards(self): car = Car() start_point = 3*RIGHT + 2*UP end_point = start_point + LEFT nudged_end_point = end_point + MED_SMALL_BUFF*LEFT car.move_to(start_point) arrow = Arrow(RIGHT, LEFT, color = RED) arrow.next_to(car, UP+LEFT) arrow.shift(MED_LARGE_BUFF*RIGHT) self.play(FadeIn(car)) self.play(ShowCreation(arrow)) self.play(MoveCar( car, end_point, moving_forward = False, run_time = 3 )) self.wait() ghost_car = car.copy().fade() right_nose_line = self.get_car_nose_line(car) self.play(ShowCreation(right_nose_line)) self.add(ghost_car) self.play(MoveCar( car, nudged_end_point, moving_forward = False )) left_nose_line = self.get_car_nose_line(car) self.play(ShowCreation(left_nose_line)) self.nose_lines = VGroup(left_nose_line, right_nose_line) self.car = car self.ghost_car = ghost_car def write_v_dt(self): brace = Brace(self.nose_lines, DOWN, buff = 0) equation = TexMobject("ds", "=", "v(t)", "dt") equation.next_to(brace, DOWN, SMALL_BUFF, LEFT) equation.set_color_by_tex("ds", DISTANCE_COLOR) equation.set_color_by_tex("dt", TIME_COLOR) negative = TextMobject("Negative") negative.set_color(RED) negative.next_to(equation.get_corner(UP+RIGHT), UP, LARGE_BUFF) ds_arrow, v_arrow = arrows = VGroup(*[ Arrow( negative.get_bottom(), equation.get_part_by_tex(tex).get_top(), color = RED, ) for tex in ("ds", "v(t)") ]) self.play( GrowFromCenter(brace), Write(equation) ) self.wait() self.play(FadeIn(negative)) self.play(ShowCreation(v_arrow)) self.wait(2) self.play(ReplacementTransform( v_arrow.copy(), ds_arrow )) self.wait(2) self.ds_equation = equation self.negative_word = negative self.negative_word_arrows = arrows def show_rectangle(self): rect_list = self.get_riemann_rectangles_list( self.graph, x_min = 0, x_max = 8, n_iterations = 6, start_color = BLUE_D, end_color = BLUE_D, fill_opacity = 0.75, ) rects = rect_list[0] rect = rects[len(rects)*self.sample_input//8] dt_brace = Brace(rect, UP, buff = 0) v_brace = Brace(rect, LEFT, buff = 0) dt_label = dt_brace.get_text("$dt$", buff = SMALL_BUFF) dt_label.set_color(YELLOW) v_label = v_brace.get_text("$v(t)$", buff = SMALL_BUFF) v_label.add_background_rectangle() self.play(FadeOut(self.v_line), FadeIn(rect)) self.play( GrowFromCenter(dt_brace), GrowFromCenter(v_brace), Write(dt_label), Write(v_label), ) self.wait(2) self.play(*it.chain( [FadeIn(r) for r in rects if r is not rect], list(map(FadeOut, [ dt_brace, v_brace, dt_label, v_label ])) )) self.wait() for new_rects in rect_list[1:]: self.transform_between_riemann_rects(rects, new_rects) self.wait() def write_signed_area(self): words = TextMobject("``Signed area''") words.next_to(self.coords_to_point(self.sample_input, 0), UP) symbols = VGroup(*[ TexMobject(sym).move_to(self.coords_to_point(*coords)) for sym, coords in [ ("+", (1, 2)), ("-", (5, -1)), ("+", (7.6, 0.5)), ] ]) self.play(Write(words)) self.play(Write(symbols)) self.wait() #### def get_car_nose_line(self, car): line = DashedLine(car.get_top(), car.get_bottom()) line.move_to(car.get_right()) return line class NextVideo(TeacherStudentsScene): def construct(self): series = VideoSeries() series.to_edge(UP) next_video = series[8] integral = TexMobject("\\int") integral.next_to(next_video, DOWN, LARGE_BUFF) self.play(FadeIn(series, lag_ratio = 0.5)) self.play( next_video.set_color, YELLOW, next_video.shift, next_video.get_height()*DOWN/2, self.teacher.change_mode, "raise_right_hand" ) self.play(Write(integral)) self.wait(5) class Chapter8PatreonThanks(PatreonThanks): CONFIG = { "specific_patrons" : [ "Ali Yahya", "CrypticSwarm", "Kaustuv DeBiswas", "Kathryn Schmiedicke", "Karan Bhargava", "Ankit Agarwal", "Yu Jun", "Dave Nicponski", "Damion Kistler", "Juan Benet", "Othman Alikhan", "Markus Persson", "Dan Buchoff", "Derek Dai", "Joseph John Cox", "Luc Ritchie", "Robert Teed", "Jason Hise", "Meshal Alshammari", "Bernd Sing", "Nils Schneider", "James Thornton", "Mustafa Mahdi", "Jonathan Eppele", "Mathew Bramson", "Jerry Ling", "Mark Govea", "Vecht", "Shimin Kuang", "Rish Kundalia", "Achille Brighton", "Ripta Pasay", ] } class Thumbnail(Chapter1Thumbnail): CONFIG = { "x_axis_label" : "", "y_axis_label" : "", "graph_origin" : 1.5*DOWN + 4*LEFT, "y_axis_height" : 5, "x_max" : 5, "x_axis_width" : 11, } def construct(self): self.setup_axes() self.remove(*self.x_axis.numbers) self.remove(*self.y_axis.numbers) graph = self.get_graph(self.func) rects = self.get_riemann_rectangles( graph, x_min = 0, x_max = 4, dx = 0.25, ) words = TextMobject("Integrals") words.set_width(8) words.to_edge(UP) self.add(graph, rects, words)
from_3b1b/old/eoc/chapter8.py
85,414
Sit in carShow speedometerTravelAsk about distanceChange if different from x_min for mob in anim.starting_mobject, anim.mobject: mob.randy.rotate_in_place(np.pi/6)Shrink dt just a bitFade close to 0
202
en
0.635571
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json import requests import six from six.moves.urllib import parse as urlparse class _RequestObjectProxy(object): """A wrapper around a requests.Request that gives some extra information. This will be important both for matching and so that when it's save into the request_history users will be able to access these properties. """ def __init__(self, request, **kwargs): self._request = request self._matcher = None self._url_parts_ = None self._qs = None # All of these params should always exist but we use a default # to make the test setup easier. self._timeout = kwargs.pop('timeout', None) self._allow_redirects = kwargs.pop('allow_redirects', None) self._verify = kwargs.pop('verify', None) self._stream = kwargs.pop('stream', None) self._cert = kwargs.pop('cert', None) self._proxies = copy.deepcopy(kwargs.pop('proxies', {})) # FIXME(jamielennox): This is part of bug #1584008 and should default # to True (or simply removed) in a major version bump. self._case_sensitive = kwargs.pop('case_sensitive', False) def __getattr__(self, name): return getattr(self._request, name) @property def _url_parts(self): if self._url_parts_ is None: url = self._request.url if not self._case_sensitive: url = url.lower() self._url_parts_ = urlparse.urlparse(url) return self._url_parts_ @property def scheme(self): return self._url_parts.scheme @property def netloc(self): return self._url_parts.netloc @property def hostname(self): try: return self.netloc.split(':')[0] except IndexError: return '' @property def port(self): components = self.netloc.split(':') try: return int(components[1]) except (IndexError, ValueError): pass if self.scheme == 'https': return 443 if self.scheme == 'http': return 80 # The default return shouldn't matter too much because if you are # wanting to test this value you really should be explicitly setting it # somewhere. 0 at least is a boolean False and an int. return 0 @property def path(self): return self._url_parts.path @property def query(self): return self._url_parts.query @property def qs(self): if self._qs is None: self._qs = urlparse.parse_qs(self.query) return self._qs @property def timeout(self): return self._timeout @property def allow_redirects(self): return self._allow_redirects @property def verify(self): return self._verify @property def stream(self): return self._stream @property def cert(self): return self._cert @property def proxies(self): return self._proxies @classmethod def _create(cls, *args, **kwargs): return cls(requests.Request(*args, **kwargs).prepare()) @property def text(self): body = self.body if isinstance(body, six.binary_type): body = body.decode('utf-8') return body def json(self, **kwargs): return json.loads(self.text, **kwargs) @property def matcher(self): """The matcher that this request was handled by. The matcher object is handled by a weakref. It will return the matcher object if it is still available - so if the mock is still in place. If the matcher is not available it will return None. """ return self._matcher() def __str__(self): return "{0.method} {0.url}".format(self._request)
venv/lib/python3.6/site-packages/requests_mock/request.py
4,419
A wrapper around a requests.Request that gives some extra information. This will be important both for matching and so that when it's save into the request_history users will be able to access these properties. The matcher that this request was handled by. The matcher object is handled by a weakref. It will return the matcher object if it is still available - so if the mock is still in place. If the matcher is not available it will return None. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. All of these params should always exist but we use a default to make the test setup easier. FIXME(jamielennox): This is part of bug 1584008 and should default to True (or simply removed) in a major version bump. The default return shouldn't matter too much because if you are wanting to test this value you really should be explicitly setting it somewhere. 0 at least is a boolean False and an int.
1,375
en
0.898641
''' Acdfg class will have the class definitions for loading and creating acdfg objects ''' from __future__ import print_function try: from enum import Enum except ImportError: from enum34 import Enum #import proto_acdfg from protobuf.proto_acdfg_pb2 import Acdfg as ProtoAcdfg import logging class NodeType(Enum): regular_node = 1 data_node = 2 method_node = 3 class EdgeType(Enum): control_edge = 1 def_edge = 2 use_edge = 3 transitive_edge = 4 exceptional_edge = 5 class Node: def __init__(self, node_type, key): self.node_type = node_type self.id = key assert isinstance(key, int) # assert isinstance(node_type, NodeType) # def __init__(self, key): # self.node_type = NodeType.regular_node # self.id = key # assert isinstance(key, int) def get_type(self): return self.node_type def get_id(self): return self.id def get_node_type_str(self): if (self.node_type == NodeType.regular_node): return "regular node" elif (self.node_type == NodeType.data_node): return "data node" elif (self.node_type == NodeType.method_node): return "method node" else: assert False, ' Unhandled node type' class DataNode(Node): DATA_VAR = 0 DATA_CONST = 1 def __init__(self, key, name, data_type, data_type_type): Node.__init__(self, NodeType.data_node, key) self.name = name self.data_type = data_type if ("DATA_VAR" == ProtoAcdfg.DataNode.DataType.Name(data_type_type)): self.data_type_type = DataNode.DATA_VAR elif ("DATA_CONST" == ProtoAcdfg.DataNode.DataType.Name(data_type_type)): self.data_type_type = DataNode.DATA_CONST else: logging.error("Cannot determine the type %s for data node" % (str(data_type_type))) raise Exception("Cannot determine the type %s for data node" % (str(data_type_type))) logging.debug('DataNode: (%s,%s,%s,%s)' % (str(key), str(name), str(data_type), str(data_type_type))) def get_name(self): return self.name def get_data_type(self): return self.data_type def get_data_type_type(self): return self.data_type_type class MethodNode(Node): def __init__(self, key, name, receiver, arg_list): Node.__init__(self, NodeType.method_node, key) self.name = name self.receiver = receiver self.arg_list = arg_list for a in arg_list: assert isinstance(a, DataNode) if receiver: assert isinstance(receiver, DataNode) logging.debug(type(name)) assert isinstance(name, str) or isinstance(name, unicode) logging.debug('Method Node: %s,%s' % (str(key), str(name))) def get_name(self): return self.name def get_receiver(self): return self.receiver def get_args(self): return self.arg_list class Edge: def __init__(self, edge_type, key, src, tgt): self.edge_type = edge_type self.id = key self.src = src self.tgt = tgt assert isinstance(src, Node) assert isinstance(tgt, Node) def get_id(self): return self.id def get_edge_type(self): return self.edge_type class DefEdge(Edge): def __init__(self, key, src, tgt): Edge.__init__(self, EdgeType.def_edge, key, src, tgt) assert isinstance(tgt, DataNode) class UseEdge(Edge): def __init__(self, key, src, tgt): Edge.__init__(self, EdgeType.use_edge, key, src, tgt) assert isinstance(src, DataNode) class ControlEdge(Edge): def __init__(self, key, src, tgt): Edge.__init__(self, EdgeType.control_edge, key, src, tgt) class TransitiveEdge(Edge): def __init__(self, key, src, tgt): Edge.__init__(self, EdgeType.transitive_edge, key, src, tgt) class ExceptionEdge(Edge): def __init__(self, key, src, tgt): Edge.__init__(self, EdgeType.exceptional_edge, key, src, tgt) class Acdfg: def __init__(self, acdfg_protobuf_obj): self.acdfg_protobuf = acdfg_protobuf_obj self.all_nodes = {} self.data_nodes = {} self.method_nodes = {} self.regular_nodes = {} self.all_edges = {} def add_node(self, node): assert isinstance(node, Node), \ 'Only node objects can be added through add_node' key = node.get_id() assert key not in self.all_nodes, \ 'key %d for node already present'%key self.all_nodes[key] = node if isinstance(node, DataNode): self.data_nodes[key] = node elif isinstance(node, MethodNode): self.method_nodes[key] = node else: self.regular_nodes[key] = node def get_data_nodes(self): return self.data_nodes def get_method_nodes(self): return self.method_nodes def add_edge(self, edge): assert isinstance(edge, Edge) key = edge.get_id() assert key not in self.all_edges, 'key %d for edge already present'%key self.all_edges[key] = edge def get_node_from_id(self, id): if id in self.data_nodes: return self.data_nodes[id] elif id in self.method_nodes: return self.method_nodes[id] elif id in self.regular_nodes: return self.regular_nodes[id] else: assert False, 'ID: %d not found'%(id) def get_node_obj_from_ids(acdfg_obj, proto_edge): src = acdfg_obj.get_node_from_id(getattr(proto_edge, 'from')) tgt = acdfg_obj.get_node_from_id(proto_edge.to) return src, tgt def read_acdfg(filename): try: f = open(filename, 'rb') acdfg = ProtoAcdfg() # create a new acdfg # acdfg.parse_from_bytes(f.read()) acdfg.ParseFromString(f.read()) acdfg_obj = Acdfg(acdfg) for dNode in acdfg.data_node: data_node_obj = DataNode(int ( getattr(dNode,'id') ), dNode.name, getattr(dNode,'type'), dNode.data_type) acdfg_obj.add_node(data_node_obj) for mNode in acdfg.method_node: arg_ids = mNode.argument arg_list = [acdfg_obj.get_node_from_id(j) for j in arg_ids] if mNode.invokee: rcv = acdfg_obj.get_node_from_id(mNode.invokee) else: rcv = None method_node_obj = MethodNode(int(mNode.id), mNode.name, rcv, arg_list) acdfg_obj.add_node(method_node_obj) for rNode in acdfg.misc_node: misc_node_obj = Node(NodeType.regular_node,int(rNode.id)) acdfg_obj.add_node(misc_node_obj) for ctrl_edge in acdfg.control_edge: src, tgt = get_node_obj_from_ids(acdfg_obj, ctrl_edge) cedge_obj = ControlEdge(ctrl_edge.id, src, tgt) acdfg_obj.add_edge(cedge_obj) for dedge in acdfg.def_edge: src, tgt = get_node_obj_from_ids(acdfg_obj, dedge) dedge_obj = ControlEdge(dedge.id, src, tgt) acdfg_obj.add_edge(dedge_obj) for uedge in acdfg.use_edge: src, tgt = get_node_obj_from_ids(acdfg_obj, uedge) uedge_obj = UseEdge(uedge.id, src, tgt) acdfg_obj.add_edge(uedge_obj) for tedge in acdfg.trans_edge: src, tgt = get_node_obj_from_ids(acdfg_obj, tedge) tedge_obj = TransitiveEdge(tedge.id, src, tgt) acdfg_obj.add_edge(tedge_obj) f.close() return acdfg_obj except IOError: print('Could not open: ', filename, 'for reading in binary mode.') assert False
python/fixrgraph/annotator/acdfgClass.py
7,922
Acdfg class will have the class definitions for loading and creating acdfg objects import proto_acdfg assert isinstance(node_type, NodeType) def __init__(self, key): self.node_type = NodeType.regular_node self.id = key assert isinstance(key, int) create a new acdfg acdfg.parse_from_bytes(f.read())
311
en
0.356052
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .rule_data_source import RuleDataSource class RuleManagementEventDataSource(RuleDataSource): """A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case. :param resource_uri: the resource identifier of the resource the rule monitors. :type resource_uri: str :param odatatype: Polymorphic Discriminator :type odatatype: str :param event_name: the event name. :type event_name: str :param event_source: the event source. :type event_source: str :param level: the level. :type level: str :param operation_name: The name of the operation that should be checked for. If no name is provided, any operation will match. :type operation_name: str :param resource_group_name: the resource group name. :type resource_group_name: str :param resource_provider_name: the resource provider name. :type resource_provider_name: str :param status: The status of the operation that should be checked for. If no status is provided, any status will match. :type status: str :param sub_status: the substatus. :type sub_status: str :param claims: the claims. :type claims: :class:`RuleManagementEventClaimsDataSource <azure.mgmt.monitor.models.RuleManagementEventClaimsDataSource>` """ _validation = { 'odatatype': {'required': True}, } _attribute_map = { 'resource_uri': {'key': 'resourceUri', 'type': 'str'}, 'odatatype': {'key': 'odata\\.type', 'type': 'str'}, 'event_name': {'key': 'eventName', 'type': 'str'}, 'event_source': {'key': 'eventSource', 'type': 'str'}, 'level': {'key': 'level', 'type': 'str'}, 'operation_name': {'key': 'operationName', 'type': 'str'}, 'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'}, 'resource_provider_name': {'key': 'resourceProviderName', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'sub_status': {'key': 'subStatus', 'type': 'str'}, 'claims': {'key': 'claims', 'type': 'RuleManagementEventClaimsDataSource'}, } def __init__(self, resource_uri=None, event_name=None, event_source=None, level=None, operation_name=None, resource_group_name=None, resource_provider_name=None, status=None, sub_status=None, claims=None): super(RuleManagementEventDataSource, self).__init__(resource_uri=resource_uri) self.event_name = event_name self.event_source = event_source self.level = level self.operation_name = operation_name self.resource_group_name = resource_group_name self.resource_provider_name = resource_provider_name self.status = status self.sub_status = sub_status self.claims = claims self.odatatype = 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'
azure/mgmt/monitor/models/rule_management_event_data_source.py
3,396
A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case. :param resource_uri: the resource identifier of the resource the rule monitors. :type resource_uri: str :param odatatype: Polymorphic Discriminator :type odatatype: str :param event_name: the event name. :type event_name: str :param event_source: the event source. :type event_source: str :param level: the level. :type level: str :param operation_name: The name of the operation that should be checked for. If no name is provided, any operation will match. :type operation_name: str :param resource_group_name: the resource group name. :type resource_group_name: str :param resource_provider_name: the resource provider name. :type resource_provider_name: str :param status: The status of the operation that should be checked for. If no status is provided, any status will match. :type status: str :param sub_status: the substatus. :type sub_status: str :param claims: the claims. :type claims: :class:`RuleManagementEventClaimsDataSource <azure.mgmt.monitor.models.RuleManagementEventClaimsDataSource>` coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. --------------------------------------------------------------------------
1,580
en
0.606781
import argparse from datetime import datetime from json import decoder from os import path, mkdir, remove from os.path import isfile from threading import Thread from time import sleep try: from progress.bar import Bar import requests import termcolor except ImportError: print("You are missing modules. Run \"python3 -m pip install -r requirements.txt\" to " "install them.") exit(0) # print a message with a time stamp def status(message): print("{0} {1}".format(datetime.now(), message)) # clean any temp files created during runtime def cleanup(): if isfile("runfile"): remove("runfile") # main loop def main(): status("Fetching latest paste IDs...") # fetch latest 100 paste IDs fetch_limit = 100 current_request = requests.get("https://scrape.pastebin.com/api_scraping.php?limit={0}".format(fetch_limit)) current_json = [] try: current_json = current_request.json() except decoder.JSONDecodeError: status(termcolor.colored("Unable to fetch latest paste IDs. Make sure your IP is whitelisted at " "https://pastebin.com/doc_scraping_api", "red")) cleanup() exit(0) status("Paste IDs fetched. Processing...") # clean up fetched ids cleaned_json = [] for entry in current_json: if entry["key"] not in paste_ids: cleaned_json.append(entry) # create a progress bar and start downloading pastes if we have new ones if len(cleaned_json) is not 0: with Bar("Processing", max=len(cleaned_json), fill=">") as bar: for entry in cleaned_json: # download the raw paste data entry_request = requests.get("https://scrape.pastebin.com/api_scrape_item.php?i={0}" .format(entry["key"])) entry_content = entry_request.text path_file = path.join("files", "{0}.txt".format(entry["key"])) paste_ids.append(entry["key"]) # if we have a provided keyword list, check for keywords if keywords is not None: for keyword in keywords: if keyword.upper() in entry_content.upper(): bar.suffix = "%(index)d/%(max)d " + termcolor.colored("[KEYWORD] Paste \'{0}\' contains " "keyword \'{1}\'".format(entry["key"], keyword), "green") if args.noSorting is False: path_file = path.join("files", keyword, "{0}.txt".format(entry["key"])) with open(path_file, "w+", encoding='utf-8') as entry_file: entry_file.write(entry_content) break else: with open(path_file, "w+", encoding='utf-8') as entry_file: entry_file.write(entry_content) bar.suffix = "%(index)d/%(max)d Saving paste \'{0}\'".format(entry["key"]) bar.next() bar.finish() # otherwise, just say that we didn't have any new content else: status("No new pastes found, skipping downloads...") if args.infinite is False: if not isfile("runfile"): print() status("Runfile no longer found, exiting...") exit(0) skipped_pastes = fetch_limit - len(cleaned_json) if skipped_pastes != 0: status("Skipped {0} previously fetched pastes".format(skipped_pastes)) status("Cleaning up internal ID list...") while len(paste_ids) > max_id_list_size: paste_ids.pop(0) # start 60 second loop status("Hibernating for 60 seconds...") with Bar("Hibernating", max=60, fill=">", suffix="") as bar: for i in range(60): sleep(1) bar.next() bar.finish() print() Thread(main()).start() if __name__ == '__main__': AUTHOR = "SYRAPT0R" COPYRIGHT = "2019-2022" VERSION = "0.5.3" # parse arguments keywords = None parser = argparse.ArgumentParser(description="A script to scrape pastebin.com with optional keyword search") parser.add_argument("-k", "--keywords", help="A file containing keywords for the search") parser.add_argument("-i", "--infinite", help="Whether to run in infinite mode (Default: false)", action="store_true", default=False) parser.add_argument("-nS", "--noSorting", help="Whether to sort keyword pastes into subdirectories", action="store_true", default=False) args = parser.parse_args() status("STARTING PASTA SCRAPER {0}, (c) {1} {2}".format(VERSION, COPYRIGHT, AUTHOR)) print() # make sure file directories exists if not path.isdir("files"): status(termcolor.colored("No file directory found, creating...", "yellow")) mkdir("files") if args.keywords is not None: try: with open(args.keywords, "r") as f: keywords = f.readlines() except IOError: status(termcolor.colored("Unable to load specified keyword file. Aborting...", "red")) exit(0) keywords = [keyword.strip() for keyword in keywords] # create subdirectories if required if args.noSorting is False: for keyword in keywords: current_path = path.join("files", keyword) if not path.isdir(current_path): status(termcolor.colored("Creating directory {0}".format(current_path), "yellow")) mkdir(current_path) status("Loaded {0} keywords".format(len(keywords))) # create paste ID index paste_ids = [] max_id_list_size = 200 # create non infinite file if needed if args.infinite is False: status("Creating run file...") f = open("runfile", "w+") f.close() else: status("Running in infinite mode...") # preparation done, enter main loop status("Entering main loop...") print() main()
scrape.py
6,391
print a message with a time stamp clean any temp files created during runtime main loop fetch latest 100 paste IDs clean up fetched ids create a progress bar and start downloading pastes if we have new ones download the raw paste data if we have a provided keyword list, check for keywords otherwise, just say that we didn't have any new content start 60 second loop parse arguments make sure file directories exists create subdirectories if required create paste ID index create non infinite file if needed preparation done, enter main loop
541
en
0.721259
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Parsplice(CMakePackage): """ParSplice code implements the Parallel Trajectory Splicing algorithm""" homepage = "https://gitlab.com/exaalt/parsplice" url = "https://gitlab.com/api/v4/projects/exaalt%2Fparsplice/repository/archive.tar.gz?sha=v1.1" git = "https://gitlab.com/exaalt/parsplice.git" tags = ['ecp', 'ecp-apps'] version('develop', branch='master') version('1.1', '3a72340d49d731a076e8942f2ae2f4e9') depends_on("cmake@3.1:", type='build') depends_on("berkeley-db") depends_on("nauty") depends_on("boost") depends_on("mpi") depends_on("eigen@3:") depends_on("lammps+lib@20170901:") def cmake_args(self): options = ['-DBUILD_SHARED_LIBS=ON'] return options
var/spack/repos/builtin/packages/parsplice/package.py
982
ParSplice code implements the Parallel Trajectory Splicing algorithm Copyright 2013-2019 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT)
259
en
0.673312
class QuizBrain: def __init__(self, questions): self.question_no = 0 self.score = 0 self.questions = questions self.current_question = None def has_more_questions(self): """To check if the quiz has more questions""" return self.question_no < len(self.questions) def next_question(self): """Get the next question by incrementing the question number""" self.current_question = self.questions[self.question_no] self.question_no += 1 q_text = self.current_question.question_text return f"Q.{self.question_no}: {q_text}" def check_answer(self, user_answer): """Check the user answer against the correct answer and maintain the score""" correct_answer = self.current_question.correct_answer if user_answer.lower() == correct_answer.lower(): self.score += 1 return True else: return False def get_score(self): """Get the number of correct answers, wrong answers and score percentage.""" wrong = self.question_no - self.score score_percent = int(self.score / self.question_no * 100) return (self.score, wrong, score_percent)
quiz_brain.py
1,266
Check the user answer against the correct answer and maintain the score Get the number of correct answers, wrong answers and score percentage. To check if the quiz has more questions Get the next question by incrementing the question number
240
en
0.925566
from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import render, setup class DefaultTests(SimpleTestCase): """ Literal string arguments to the default filter are always treated as safe strings, regardless of the auto-escaping state. Note: we have to use {"a": ""} here, otherwise the invalid template variable string interferes with the test result. """ @setup({'default01': '{{ a|default:"x<" }}'}) def test_default01(self): output = render('default01', {"a": ""}) self.assertEqual(output, "x<") @setup({'default02': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'}) def test_default02(self): output = render('default02', {"a": ""}) self.assertEqual(output, "x<") @setup({'default03': '{{ a|default:"x<" }}'}) def test_default03(self): output = render('default03', {"a": mark_safe("x>")}) self.assertEqual(output, "x>") @setup({'default04': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'}) def test_default04(self): output = render('default04', {"a": mark_safe("x>")}) self.assertEqual(output, "x>") class DefaultIfNoneTests(SimpleTestCase): @setup({'default_if_none01': '{{ a|default:"x<" }}'}) def test_default_if_none01(self): output = render('default_if_none01', {"a": None}) self.assertEqual(output, "x<") @setup({'default_if_none02': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'}) def test_default_if_none02(self): output = render('default_if_none02', {"a": None}) self.assertEqual(output, "x<")
tests/template_tests/filter_tests/test_default.py
1,677
Literal string arguments to the default filter are always treated as safe strings, regardless of the auto-escaping state. Note: we have to use {"a": ""} here, otherwise the invalid template variable string interferes with the test result.
239
en
0.729279
"""General-purpose training script for image-to-image translation. This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization). You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model'). It first creates model, dataset, and visualizer given the option. It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models. The script supports continue/resume training. Use '--continue_train' to resume your previous training. Example: Train a CycleGAN model: python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan Train a pix2pix model: python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA See options/base_options.py and options/train_options.py for more training options. See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md """ import time from options.train_options import TrainOptions from data import create_dataset from models import create_model from util.visualizer import Visualizer from util import util if __name__ == '__main__': # Create dataset with fixed sample patches opt_eval = TrainOptions().parse() opt_eval.num_train, opt_eval.num_test, opt_eval.eval_mode = 100, 100, True dataset_eval = create_dataset(opt_eval) # create a dataset given opt.eval_mode and other options # Intialize training dataset and model opt = TrainOptions().parse() # get training options dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options dataset_size = len(dataset) # get the number of images in the dataset. print('The number of training images = %d' % dataset_size) model = create_model(opt) # create a model given opt.model and other options model.setup(opt) # regular setup: load and print networks; create schedulers visualizer = Visualizer(opt) # create a visualizer that display/save images and plots total_iters = 0 # the total number of training iterations # Evaluate metrics before running the model metrics_log_file = model.save_dir + '/' + opt.name + '_metrics.txt' util.eval_error_metrics(0, model, dataset_eval, log_filename=metrics_log_file) for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq> epoch_start_time = time.time() # timer for entire epoch iter_data_time = time.time() # timer for data loading per iteration epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch for i, data in enumerate(dataset): # inner loop within one epoch iter_start_time = time.time() # timer for computation per iteration if total_iters % opt.print_freq == 0: t_data = iter_start_time - iter_data_time visualizer.reset() total_iters += opt.batch_size epoch_iter += opt.batch_size model.set_input(data) # unpack data from dataset and apply preprocessing model.optimize_parameters() # calculate loss functions, get gradients, update network weights if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file save_result = total_iters % opt.update_html_freq == 0 model.compute_visuals() visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk losses = model.get_current_losses() t_comp = (time.time() - iter_start_time) / opt.batch_size visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data) if opt.display_id > 0: visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses) if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest' model.save_networks(save_suffix) iter_data_time = time.time() if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) model.save_networks('latest') model.save_networks(epoch) util.eval_error_metrics(epoch, model, dataset_eval, log_filename=metrics_log_file) print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) model.update_learning_rate() # update learning rates at the end of every epoch.
train.py
5,467
General-purpose training script for image-to-image translation. This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization). You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model'). It first creates model, dataset, and visualizer given the option. It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models. The script supports continue/resume training. Use '--continue_train' to resume your previous training. Example: Train a CycleGAN model: python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan Train a pix2pix model: python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA See options/base_options.py and options/train_options.py for more training options. See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md Create dataset with fixed sample patches create a dataset given opt.eval_mode and other options Intialize training dataset and model get training options create a dataset given opt.dataset_mode and other options get the number of images in the dataset. create a model given opt.model and other options regular setup: load and print networks; create schedulers create a visualizer that display/save images and plots the total number of training iterations Evaluate metrics before running the model outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq> timer for entire epoch timer for data loading per iteration the number of training iterations in current epoch, reset to 0 every epoch inner loop within one epoch timer for computation per iteration unpack data from dataset and apply preprocessing calculate loss functions, get gradients, update network weights display images on visdom and save images to a HTML file print training losses and save logging information to the disk cache our latest model every <save_latest_freq> iterations cache our model every <save_epoch_freq> epochs update learning rates at the end of every epoch.
2,438
en
0.689628
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace import contextlib import copy import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from dataclasses import dataclass, field from omegaconf import MISSING, II, open_dict from typing import Any from fairseq import checkpoint_utils, tasks, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.tasks import FairseqTask from fairseq.models import ( BaseFairseqModel, FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, ) from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer @dataclass class Wav2Vec2AsrConfig(FairseqDataclass): w2v_path: str = field( default=MISSING, metadata={"help": "path to wav2vec 2.0 model"} ) no_pretrained_weights: bool = field( default=False, metadata={"help": "if true, does not load pretrained weights"} ) dropout_input: float = field( default=0.0, metadata={"help": "dropout to apply to the input (after feat extr)"}, ) final_dropout: float = field( default=0.0, metadata={"help": "dropout after transformer and before final projection"}, ) dropout: float = field( default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"} ) attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights inside wav2vec 2.0 model" }, ) activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN inside wav2vec 2.0 model" }, ) # masking apply_mask: bool = field( default=False, metadata={"help": "apply masking during fine-tuning"} ) mask_length: int = field( default=10, metadata={"help": "repeat the mask indices multiple times"} ) mask_prob: float = field( default=0.5, metadata={ "help": "probability of replacing a token with mask (normalized by length)" }, ) mask_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose masks"} ) mask_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indices" }, ) no_mask_overlap: bool = field( default=False, metadata={"help": "whether to allow masks to overlap"} ) # channel masking mask_channel_length: int = field( default=10, metadata={"help": "length of the mask for features (channels)"} ) mask_channel_prob: float = field( default=0.0, metadata={"help": "probability of replacing a feature with 0"} ) mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( default="static", metadata={"help": "how to choose mask length for channel masking"}, ) mask_channel_other: float = field( default=0, metadata={ "help": "secondary mask argument (used for more complex distributions), " "see help in compute_mask_indicesh" }, ) no_mask_channel_overlap: bool = field( default=False, metadata={"help": "whether to allow channel masks to overlap"} ) freeze_finetune_updates: int = field( default=0, metadata={"help": "dont finetune wav2vec for this many updates"} ) feature_grad_mult: float = field( default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"} ) layerdrop: float = field( default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"} ) normalize: bool = II("task.normalize") data: str = II("task.data") # this holds the loaded wav2vec args w2v_args: Any = None @dataclass class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig): pass @register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig) class Wav2VecCtc(BaseFairseqModel): def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel): super().__init__() self.cfg = cfg self.w2v_encoder = w2v_encoder def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict @classmethod def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask): """Build a new model instance.""" w2v_encoder = Wav2VecEncoder(cfg, task.target_dictionary) return cls(cfg, w2v_encoder) def get_normalized_probs(self, net_output, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" logits = net_output["encoder_out"] if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1) def forward(self, **kwargs): x = self.w2v_encoder(**kwargs) return x @dataclass class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig): decoder_embed_dim: int = field( default=768, metadata={"help": "decoder embedding dimension"} ) decoder_ffn_embed_dim: int = field( default=3072, metadata={"help": "decoder embedding dimension for FFN"} ) decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"}) decoder_layerdrop: float = field( default=0.0, metadata={"help": "decoder layerdrop chance"} ) decoder_attention_heads: int = field( default=4, metadata={"help": "num decoder attention heads"} ) decoder_learned_pos: bool = field( default=False, metadata={"help": "use learned positional embeddings in the decoder"}, ) decoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each decoder block"} ) no_token_positional_embeddings: bool = field( default=False, metadata={ "help": "if set, disables positional embeddings (outside self attention)" }, ) decoder_dropout: float = field( default=0.0, metadata={"help": "dropout probability in the decoder"} ) decoder_attention_dropout: float = field( default=0.0, metadata={ "help": "dropout probability for attention weights inside the decoder" }, ) decoder_activation_dropout: float = field( default=0.0, metadata={ "help": "dropout probability after activation in FFN inside the decoder" }, ) max_target_positions: int = field( default=2048, metadata={"help": "max target positions"} ) share_decoder_input_output_embed: bool = field( default=False, metadata={"help": "share decoder input and output embeddings"} ) @register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig) class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask): """Build a new model instance.""" src_dict, tgt_dict = task.source_dictionary, task.target_dictionary def build_embedding(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) return emb decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim) encoder = cls.build_encoder(cfg) decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens) return Wav2Vec2Seq2SeqModel(encoder, decoder) @classmethod def build_encoder(cls, cfg: Wav2Vec2AsrConfig): return Wav2VecEncoder(cfg) @classmethod def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens): return TransformerDecoder(cfg, tgt_dict, embed_tokens) def forward(self, **kwargs): encoder_out = self.encoder(tbc=False, **kwargs) decoder_out = self.decoder(encoder_out=encoder_out, **kwargs) return decoder_out def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) return state_dict class Wav2VecEncoder(FairseqEncoder): def __init__(self, cfg: Wav2Vec2AsrConfig, tgt_dict=None): self.apply_mask = cfg.apply_mask arg_overrides = { "dropout": cfg.dropout, "activation_dropout": cfg.activation_dropout, "dropout_input": cfg.dropout_input, "attention_dropout": cfg.attention_dropout, "mask_length": cfg.mask_length, "mask_prob": cfg.mask_prob, "mask_selection": cfg.mask_selection, "mask_other": cfg.mask_other, "no_mask_overlap": cfg.no_mask_overlap, "mask_channel_length": cfg.mask_channel_length, "mask_channel_prob": cfg.mask_channel_prob, "mask_channel_selection": cfg.mask_channel_selection, "mask_channel_other": cfg.mask_channel_other, "no_mask_channel_overlap": cfg.no_mask_channel_overlap, "encoder_layerdrop": cfg.layerdrop, "feature_grad_mult": cfg.feature_grad_mult, } if cfg.w2v_args is None: state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides) w2v_args = state.get("cfg", None) if w2v_args is None: w2v_args = convert_namespace_to_omegaconf(state["args"]) cfg.w2v_args = w2v_args else: state = None w2v_args = cfg.w2v_args if isinstance(w2v_args, Namespace): cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args) assert cfg.normalize == w2v_args.task.normalize, ( "Fine-tuning works best when data normalization is the same. " "Please check that --normalize is set or unset for both pre-training and here" ) w2v_args.task.data = cfg.data task = tasks.setup_task(w2v_args.task) model = task.build_model(w2v_args.model) if state is not None and not cfg.no_pretrained_weights: model.load_state_dict(state["model"], strict=True) model.remove_pretraining_modules() super().__init__(task.source_dictionary) d = w2v_args.model.encoder_embed_dim self.w2v_model = model self.final_dropout = nn.Dropout(cfg.final_dropout) self.freeze_finetune_updates = cfg.freeze_finetune_updates self.num_updates = 0 if tgt_dict is not None: self.proj = Linear(d, len(tgt_dict)) elif getattr(cfg, "decoder_embed_dim", d) != d: self.proj = Linear(d, cfg.decoder_embed_dim) else: self.proj = None def set_num_updates(self, num_updates): """Set the number of parameters updates.""" super().set_num_updates(num_updates) self.num_updates = num_updates def forward(self, source, padding_mask, tbc=True, **kwargs): w2v_args = { "source": source, "padding_mask": padding_mask, "mask": self.apply_mask and self.training, } ft = self.freeze_finetune_updates <= self.num_updates with torch.no_grad() if not ft else contextlib.ExitStack(): x, padding_mask = self.w2v_model.extract_features(**w2v_args) if tbc: # B x T x C -> T x B x C x = x.transpose(0, 1) x = self.final_dropout(x) if self.proj: x = self.proj(x) return { "encoder_out": x, # T x B x C "encoder_padding_mask": padding_mask, # B x T "padding_mask": padding_mask, } def reorder_encoder_out(self, encoder_out, new_order): if encoder_out["encoder_out"] is not None: encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(0, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return None def upgrade_state_dict_named(self, state_dict, name): return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__( self, cfg: Wav2Vec2Seq2SeqConfig, dictionary, embed_tokens, no_encoder_attn=False, ): super().__init__(dictionary) self.dropout = cfg.decoder_dropout self.share_input_output_embed = cfg.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = cfg.decoder_embed_dim self.output_embed_dim = cfg.decoder_embed_dim self.layerdrop = cfg.decoder_layerdrop padding_idx = embed_tokens.padding_idx self.max_target_positions = cfg.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = ( Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None ) self.embed_positions = ( PositionalEmbedding( cfg.max_target_positions, embed_dim, padding_idx, learned=cfg.decoder_learned_pos, ) if not cfg.no_token_positional_embeddings else None ) # TODO: update this when transformer gets converted to dataclass configs transformer_cfg = copy.deepcopy(cfg) with open_dict(transformer_cfg): transformer_cfg.dropout = transformer_cfg.decoder_dropout transformer_cfg.attention_dropout = ( transformer_cfg.decoder_attention_dropout ) transformer_cfg.activation_dropout = ( transformer_cfg.decoder_activation_dropout ) self.layers = nn.ModuleList([]) self.layers.extend( [ TransformerDecoderLayer(transformer_cfg, no_encoder_attn) for _ in range(transformer_cfg.decoder_layers) ] ) if not self.share_input_output_embed: self.embed_out = nn.Parameter( torch.Tensor(len(dictionary), self.output_embed_dim) ) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if transformer_cfg.decoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ prev_output_tokens = prev_output_tokens.long() x, extra = self.extract_features( prev_output_tokens, encoder_out, incremental_state ) x = self.output_layer(x) return x, extra def extract_features( self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused ): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: dropout_probability = np.random.random() if not self.training or (dropout_probability > self.layerdrop): x, attn, _ = layer( x, encoder_out["encoder_out"] if encoder_out is not None else None, encoder_out["encoder_padding_mask"] if encoder_out is not None else None, incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) return x, {"attn": attn, "inner_states": inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device or self._future_mask.size(0) < dim ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): return state_dict def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
fairseq/models/wav2vec/wav2vec2_asr.py
20,506
Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). Build a new model instance. Build a new model instance. Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs Get normalized probabilities (or log probs) from a net's output. Maximum input length supported by the encoder. Maximum output length supported by the decoder. Project features to the vocabulary size. Set the number of parameters updates. Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. masking channel masking this holds the loaded wav2vec args B x T x C -> T x B x C T x B x C B x T todo: try with input_embed_dim TODO: update this when transformer gets converted to dataclass configs embed positions embed tokens and positions B x T x C -> T x B x C decoder layers T x B x C -> B x T x C project back to size of vocabulary
1,876
en
0.662981
""" Django settings for webappexample project. Generated by 'django-admin startproject' using Django 1.11.4. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ from dotenv import load_dotenv, find_dotenv import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '*dn4z%$4b6-d1+epmb=hd1m3g#$*1*%&%x+4m_8*cvakee%=7q' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'social_django', 'auth0login' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'webappexample.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'webappexample.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/' ENV_FILE = find_dotenv() if ENV_FILE: load_dotenv(ENV_FILE) # SOCIAL AUTH AUTH0 BACKEND CONFIG SOCIAL_AUTH_TRAILING_SLASH = False SOCIAL_AUTH_AUTH0_KEY = os.environ.get('AUTH0_CLIENT_ID') SOCIAL_AUTH_AUTH0_SECRET = os.environ.get('AUTH0_CLIENT_SECRET') SOCIAL_AUTH_AUTH0_SCOPE = [ 'openid', 'profile' ] SOCIAL_AUTH_AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN') AUDIENCE = None if os.environ.get('AUTH0_AUDIENCE'): AUDIENCE = os.environ.get('AUTH0_AUDIENCE') else: if SOCIAL_AUTH_AUTH0_DOMAIN: AUDIENCE = 'https://' + SOCIAL_AUTH_AUTH0_DOMAIN + '/userinfo' if AUDIENCE: SOCIAL_AUTH_AUTH0_AUTH_EXTRA_ARGUMENTS = {'audience': AUDIENCE} AUTHENTICATION_BACKENDS = { 'auth0login.auth0backend.Auth0', 'django.contrib.auth.backends.ModelBackend' } LOGIN_URL = '/login/auth0' LOGIN_REDIRECT_URL = '/dashboard'
01-Login/webappexample/settings.py
4,045
Django settings for webappexample project. Generated by 'django-admin startproject' using Django 1.11.4. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/1.11/ref/settings/databases Password validation https://docs.djangoproject.com/en/1.11/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/1.11/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.11/howto/static-files/ SOCIAL AUTH AUTH0 BACKEND CONFIG
1,035
en
0.628364
""" Author: thangbk2209 Project: Autoscaling Created: 3/15/19 16:48 Purpose: """ import random import os import matplotlib import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler, StandardScaler import tensorflow as tf from config import * def draw_time_series(data, title, x_label, y_label, file_name): plt.plot(data) plt.title(title) plt.ylabel(y_label) plt.xlabel(x_label) # plt.legend([/], loc='upper left') plt.savefig(file_name + '.png') plt.show() plt.close() def get_scaler(scaler_method): if scaler_method == 'min_max_scaler': return MinMaxScaler(feature_range=(0, 1)) if scaler_method == 'standard_scaler': return StandardScaler() else: print(f'|-> ERROR: Not support {scaler_method}') def get_activation(activation_name): if activation_name == 'sigmoid': return tf.nn.sigmoid elif activation_name == 'relu': return tf.nn.relu elif activation_name == 'tanh': return tf.nn.tanh elif activation_name == 'elu': return tf.nn.elu else: print(">>> Can not apply your activation <<<") def get_optimizer(optimizer_name, lr): if optimizer_name == 'momentum': return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9) elif optimizer_name == 'adam': return tf.train.AdamOptimizer(learning_rate=lr) elif optimizer_name == 'rmsprop': return tf.train.RMSPropOptimizer(learning_rate=lr) else: print(">>> Can not apply your optimizer <<<") def early_stopping_decision(array, patience): value = array[len(array) - patience - 1] arr = array[len(array) - patience:] check = 0 for val in arr: if(val > value): check += 1 if(check == patience): return False else: return True def draw_train_loss(loss_train, loss_valid, save_path): plt.plot(loss_train) plt.plot(loss_valid) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig(save_path) plt.close() def average(arr): return sum(arr) / len(arr) def create_name(**kwargs): key = list(kwargs.keys()) # collect the first key in kwargs dict name = [] for _key in key: value = str(kwargs[_key]).replace('[', '') value = value.replace(']', '') _name = f'{_key}_{value}' name.append(_name) return '-'.join(name) def generate_units_size(network_size, layer_size): assert network_size > 0, 'Network size invalid' assert layer_size > 0, 'Layer size invalid' num_units = [] for i in range(network_size): # num_units.append(random.choice(range(1, layer_size, 1))) num_units.append(int(layer_size)) if layer_size != 2: layer_size /= 2 return num_units def compute_scale_fitness_value(upper_prediction, lower_prediction, real_value): rate_real_value_in_prediction_interval = 0 num_sample = len(upper_prediction) for i in range(num_sample): _real_value = real_value[i][0] lower_border = lower_prediction[i] higher_border = upper_prediction[i] if _real_value <= higher_border and _real_value >= lower_border: rate_real_value_in_prediction_interval += 1 / num_sample return rate_real_value_in_prediction_interval def gen_folder_in_path(path): path_component = path.split('/') path_infor = '' for _path_component in path_component: path_infor += f'/{_path_component}' if not os.path.exists(path_infor): os.mkdir(path_infor) assert os.path.exists(path_infor), f'Can not generate folder in path {path}'
lib/includes/utility.py
3,753
Author: thangbk2209 Project: Autoscaling Created: 3/15/19 16:48 Purpose: plt.legend([/], loc='upper left') collect the first key in kwargs dict num_units.append(random.choice(range(1, layer_size, 1)))
203
en
0.466186
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. """ @classmethod def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) global_cfg = CfgNode() def get_cfg() -> CfgNode: """ Get a copy of the default config. Returns: a detectron2 CfgNode instance. """ from .defaults import _C return _C.clone() def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg) def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ def check_docstring(func): if func.__module__.startswith("detectron2."): assert ( func.__doc__ is not None and "experimental" in func.__doc__.lower() ), f"configurable {func} should be marked experimental" if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." check_docstring(init_func) @functools.wraps(init_func) def wrapped(self, *args, **kwargs): try: from_config_func = type(self).from_config except AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): check_docstring(orig_func) @functools.wraps(orig_func) def wrapped(*args, **kwargs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) return wrapped return wrapper def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ if len(args) and isinstance(args[0], _CfgNode): return True if isinstance(kwargs.pop("cfg", None), _CfgNode): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False
detectron2/config/config.py
9,000
The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. Returns: str: a yaml string representation of the config Get a copy of the default config. Returns: a detectron2 CfgNode instance. Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. -*- coding: utf-8 -*- Copyright (c) Facebook, Inc. and its affiliates. Note that the default value of allow_unsafe is changed to True defaults.py needs to import CfgNode compat.py needs to import CfgNode To convert, first obtain a full config at an old version to make it show up in docs @configurable() is made equivalent to @configurable forward all arguments to from_config, if from_config accepts them forward supported arguments to from_config forward the other arguments to __init__ `from_config`'s first argument is forced to be "cfg". So the above check covers all cases.
3,105
en
0.654374
#!/usr/bin/env python3 """ This script builds the ASKE deliverable reports as PDFs by combining the markdown files, using pandoc. Usage: ./build_report.py <report_name> """ import os, sys from glob import glob import subprocess as sp def transform_line(line): # Transform headers - numbered headings are not supported in Jekyll, # and we want them for the LaTeX output. if line.startswith("#"): header_level = line.split()[0] line = line.replace(header_level, header_level[:-1]) if line.split()[1][0].isdigit(): line = "# " + " ".join(line.split()[2:]) # Skip captions intended for web if line.startswith("**Figure"): line="" # Transform math expression delimiters line = line.replace("$$", "$") # Recursively include markdown files if "include_relative" in line: filename = line.split()[2] with open(filename, "r") as f: line = "\n" + "".join([transform_line(line) for line in f]) return line def build_report(report_name): """ Apply appropriate transformations to markdown files so that they can be compiled properly into a PDF report via LaTeX. """ with open("index.md", "r") as f: lines = [transform_line(line) for line in f] with open("index_transformed.md", "w") as f: f.writelines(lines) sp.call( [ "pandoc", "--template", "../pandoc_report_template.tex", "--pdf-engine", "lualatex", "-V", f"reportname={report_name}", "-N", "-f", "markdown+tex_math_dollars", "index_transformed.md", "-o", f"{report_name}.tex", ] ) sp.call(["latexmk","-lualatex",f"{report_name}.tex"]) os.remove("index_transformed.md") if __name__ == "__main__": report_name = sys.argv[1] if report_name.endswith("/"): report_name = report_name[:-1] cwd = os.getcwd() os.chdir(report_name) build_report(report_name) os.rename(report_name+".pdf", "../"+report_name+".pdf") os.chdir(cwd)
documentation/deliverable_reports/build_report.py
2,150
Apply appropriate transformations to markdown files so that they can be compiled properly into a PDF report via LaTeX. This script builds the ASKE deliverable reports as PDFs by combining the markdown files, using pandoc. Usage: ./build_report.py <report_name> !/usr/bin/env python3 Transform headers - numbered headings are not supported in Jekyll, and we want them for the LaTeX output. Skip captions intended for web Transform math expression delimiters Recursively include markdown files
498
en
0.77251
# # PySNMP MIB module Juniper-MPLS-CONF (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-MPLS-CONF # Produced by pysmi-0.3.4 at Wed May 1 14:03:27 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint") juniAgents, = mibBuilder.importSymbols("Juniper-Agents", "juniAgents") AgentCapabilities, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "ModuleCompliance", "NotificationGroup") MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, TimeTicks, Counter32, NotificationType, Integer32, Bits, ObjectIdentity, Gauge32, Counter64, Unsigned32, MibIdentifier, iso, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "TimeTicks", "Counter32", "NotificationType", "Integer32", "Bits", "ObjectIdentity", "Gauge32", "Counter64", "Unsigned32", "MibIdentifier", "iso", "IpAddress") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") juniMplsAgent = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51)) juniMplsAgent.setRevisions(('2004-06-11 21:36', '2003-01-24 18:34', '2002-11-04 15:47', '2001-12-05 21:41',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: juniMplsAgent.setRevisionsDescriptions(('Added agent capabilities definitions for MPLS-LSR-STD-MIB.', 'Replaced Unisphere names with Juniper names. Added IP TTL Propagate object to the MPLS scalar group.', 'Added RowStatus support to the minor layer and the tunnel profile groups.', 'The initial release of this management information module.',)) if mibBuilder.loadTexts: juniMplsAgent.setLastUpdated('200406231509Z') if mibBuilder.loadTexts: juniMplsAgent.setOrganization('Juniper Networks, Inc.') if mibBuilder.loadTexts: juniMplsAgent.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford, MA 01886-3146 USA Tel: +1 978 589 5800 E-mail: mib@Juniper.net') if mibBuilder.loadTexts: juniMplsAgent.setDescription('The agent capabilities definitions for the MultiProtocol Label Switching (MPLS) component of the SNMP agent in the Juniper E-series family of products.') juniMplsAgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 1)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV1 = juniMplsAgentV1.setProductRelease('Version 1 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component was supported in\n JUNOSe 4.0 system releases.') if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV1 = juniMplsAgentV1.setStatus('obsolete') if mibBuilder.loadTexts: juniMplsAgentV1.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe. These capabilities became obsolete when new RowStatus objects were added to the tables in juniMplsMinorLayerConfGroup and juniMplsTunnelProfileConfGroup.') juniMplsAgentV2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 2)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV2 = juniMplsAgentV2.setProductRelease('Version 2 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component was supported in\n JUNOSe 4.1 and subsequent 4.x system releases.') if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV2 = juniMplsAgentV2.setStatus('obsolete') if mibBuilder.loadTexts: juniMplsAgentV2.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe. These capabilities became obsolete when the IP TTL Propagate object was added to the MPLS scalar group.') juniMplsAgentV3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 3)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV3 = juniMplsAgentV3.setProductRelease('Version 3 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component is supported in\n JUNOSe 5.0 and subsequent system releases.') if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV3 = juniMplsAgentV3.setStatus('obsolete') if mibBuilder.loadTexts: juniMplsAgentV3.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe. These capabilities became obsolete when some of the objects in that MIB became obsolete.') juniMplsAgentV4 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 4)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV4 = juniMplsAgentV4.setProductRelease('Version 4 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component is supported in\n JUNOSe 6.0 and subsequent system releases.') if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV4 = juniMplsAgentV4.setStatus('obsolete') if mibBuilder.loadTexts: juniMplsAgentV4.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe. These capabilities became obsolete when the MPLS-LSR-STD-MIB support is added.') juniMplsAgentV5 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 5)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV5 = juniMplsAgentV5.setProductRelease('Version 5 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component is supported in\n JUNOSe 6.1 and subsequent system releases.') if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV5 = juniMplsAgentV5.setStatus('obsolete') if mibBuilder.loadTexts: juniMplsAgentV5.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe.') juniMplsAgentV6 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 6)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV6 = juniMplsAgentV6.setProductRelease('Version 6 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component is supported in\n JUNOSe 7.1 and subsequent system releases.') if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): juniMplsAgentV6 = juniMplsAgentV6.setStatus('current') if mibBuilder.loadTexts: juniMplsAgentV6.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe.') mibBuilder.exportSymbols("Juniper-MPLS-CONF", juniMplsAgentV1=juniMplsAgentV1, juniMplsAgentV6=juniMplsAgentV6, juniMplsAgentV3=juniMplsAgentV3, juniMplsAgentV2=juniMplsAgentV2, juniMplsAgentV5=juniMplsAgentV5, PYSNMP_MODULE_ID=juniMplsAgent, juniMplsAgentV4=juniMplsAgentV4, juniMplsAgent=juniMplsAgent)
pysnmp-with-texts/Juniper-MPLS-CONF.py
7,376
PySNMP MIB module Juniper-MPLS-CONF (http://snmplabs.com/pysmi) ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-MPLS-CONF Produced by pysmi-0.3.4 at Wed May 1 14:03:27 2019 On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
328
en
0.467309
# -*- coding:utf-8; python-indent:2; indent-tabs-mode:nil -*- # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for optimizing pytd syntax trees. pytd files come from various sources, and are typically redundant (duplicate functions, different signatures saying the same thing, overlong type disjunctions). The Visitors in this file remove various forms of these redundancies. """ import collections import logging from pytype import utils from pytype.pytd import abc_hierarchy from pytype.pytd import booleq from pytype.pytd import mro from pytype.pytd import pytd from pytype.pytd import pytd_utils from pytype.pytd import type_match from pytype.pytd import visitors import six log = logging.getLogger(__name__) class RenameUnknowns(visitors.Visitor): """Give unknowns that map to the same set of concrete types the same name.""" def __init__(self, mapping): super(RenameUnknowns, self).__init__() self.name_to_cls = {name: hash(cls) for name, cls in mapping.items()} self.cls_to_canonical_name = { cls: name for name, cls in self.name_to_cls.items()} def VisitClassType(self, node): if node.name.startswith("~unknown"): return pytd.ClassType( self.cls_to_canonical_name[self.name_to_cls[node.name]], None) else: return node class RemoveDuplicates(visitors.Visitor): """Remove duplicate function signatures. For example, this transforms def f(x: int) -> float def f(x: int) -> float to def f(x: int) -> float In order to be removed, a signature has to be exactly identical to an existing one. """ def VisitFunction(self, node): # We remove duplicates, but keep existing entries in the same order. return node.Replace( signatures=tuple(pytd_utils.OrderedSet(node.signatures))) class RemoveRedundantSignatures(visitors.Visitor): """Remove duplicate function signatures. For example, this transforms def f(x: int) -> float def f(x: int or float) -> float to def f(x: int or float) -> float In order to be removed, a signature has to be "contained" (a subclass of) an existing one. """ def __init__(self, hierarchy): super(RemoveRedundantSignatures, self).__init__() self.match = type_match.TypeMatch(hierarchy.GetSuperClasses(), any_also_is_bottom=False) self.subst = {} def EnterClass(self, cls): # Preserve the identify of each type parameter, and don't # allow them to match against anything by themselves. self.subst = {p.type_param: pytd.NamedType("$" + p.name) for p in cls.template} def LeaveClass(self, _): self.subst = {} def VisitFunction(self, node): new_signatures = [] matches = set() # We keep track of which signature matched which other signatures, purely # for optimization - that way we don't have to query the reverse direction. for i, s1 in enumerate(node.signatures): for j, s2 in enumerate(node.signatures): if i != j and (j, i) not in matches: if s1.exceptions or s2.exceptions: # We don't support matching of exceptions. continue if s1.template: # type_match doesn't support polymorphic functions on the # left side yet. continue if self.match.match(s1, s2, self.subst) == booleq.TRUE: matches.add((i, j)) break else: new_signatures.append(s1) return node.Replace(signatures=tuple(new_signatures)) class SimplifyUnions(visitors.Visitor): """Remove duplicate or redundant entries in union types. For example, this transforms a: int or int b: int or ? c: int or (int or float) to a: int b: ? c: int or float """ def VisitUnionType(self, union): return pytd_utils.JoinTypes(union.type_list) class _ReturnsAndExceptions(object): """Mutable class for collecting return types and exceptions of functions. The collecting is stable: Items are kept in the order in which they were encountered. Attributes: return_types: Return types seen so far. exceptions: Exceptions seen so far. """ def __init__(self): self.return_types = [] self.exceptions = [] def Update(self, signature): """Add the return types / exceptions of a signature to this instance.""" if signature.return_type not in self.return_types: self.return_types.append(signature.return_type) self.exceptions.extend(exception for exception in signature.exceptions if exception not in self.exceptions) class CombineReturnsAndExceptions(visitors.Visitor): """Group function signatures that only differ in exceptions or return values. For example, this transforms def f(x: int) -> float: raise OverflowError() def f(x: int) -> int: raise IndexError() to def f(x: int) -> float or int: raise IndexError() raise OverflowError() """ def _GroupByArguments(self, signatures): """Groups signatures by arguments. Arguments: signatures: A list of function signatures (Signature instances). Returns: A dictionary mapping signatures (without return and exceptions) to a tuple of return values and exceptions. """ groups = collections.OrderedDict() # Signature -> ReturnsAndExceptions for sig in signatures: stripped_signature = sig.Replace(return_type=None, exceptions=None) ret = groups.get(stripped_signature) if not ret: ret = _ReturnsAndExceptions() groups[stripped_signature] = ret ret.Update(sig) return groups def VisitFunction(self, f): """Merge signatures of a function. This groups signatures by arguments and then for each group creates a single signature that joins the return values / exceptions using "or". Arguments: f: A pytd.Function instance Returns: Function with simplified / combined signatures. """ groups = self._GroupByArguments(f.signatures) new_signatures = [] for stripped_signature, ret_exc in groups.items(): ret = pytd_utils.JoinTypes(ret_exc.return_types) exc = tuple(ret_exc.exceptions) new_signatures.append( stripped_signature.Replace(return_type=ret, exceptions=exc) ) return f.Replace(signatures=tuple(new_signatures)) class CombineContainers(visitors.Visitor): """Change unions of containers to containers of unions. For example, this transforms list[int] or list[float] to list[int or float] . """ _CONTAINER_NAMES = { pytd.TupleType: ("__builtin__.tuple", "typing.Tuple"), pytd.CallableType: ("typing.Callable",), } def _key(self, t): if isinstance(t, (pytd.CallableType, pytd.TupleType)): return (t.base_type, len(t.parameters)) else: return t.base_type def _should_merge(self, pytd_type, union): """Determine whether pytd_type values in the union should be merged. If the union contains the homogeneous flavor of pytd_type (e.g., GenericType(base_type=tuple) when pytd_type is TupleType), or pytd_type values of different lengths, we want to turn all of the pytd_type values into homogeneous ones so that they can be merged into a single container. Args: pytd_type: The pytd type, either TupleType or CallableType. union: a pytd.UnionType Returns: True if the pytd_type values should be merged, False otherwise. """ names = self._CONTAINER_NAMES[pytd_type] length = None for t in union.type_list: if isinstance(t, pytd_type): if length is None: length = len(t.parameters) elif length != len(t.parameters): return True elif (isinstance(t, pytd.GenericType) and t.base_type.name in names): return True return False def VisitUnionType(self, union): """Push unions down into containers. This collects similar container types in unions and merges them into single instances with the union type pushed down to the element_type level. Arguments: union: A pytd.Union instance. Might appear in a parameter, a return type, a constant type, etc. Returns: A simplified pytd.Union. """ if not any(isinstance(t, pytd.GenericType) for t in union.type_list): # Optimization: If we're not going to change anything, return original. return union union = pytd_utils.JoinTypes(union.type_list) # flatten if not isinstance(union, pytd.UnionType): union = pytd.UnionType((union,)) merge_tuples = self._should_merge(pytd.TupleType, union) merge_callables = self._should_merge(pytd.CallableType, union) if merge_tuples or merge_callables: type_list = [] for t in union.type_list: if merge_tuples and isinstance(t, pytd.TupleType): t = pytd.GenericType(base_type=t.base_type, parameters=(pytd.UnionType(t.parameters),)) elif merge_callables and isinstance(t, pytd.CallableType): t = pytd.GenericType(base_type=t.base_type, parameters=(pytd.AnythingType(), t.ret)) type_list.append(t) union = union.Replace(type_list=tuple(type_list)) collect = {} has_redundant_base_types = False for t in union.type_list: if isinstance(t, pytd.GenericType): key = self._key(t) if key in collect: has_redundant_base_types = True collect[key] = tuple( pytd_utils.JoinTypes([p1, p2]) for p1, p2 in zip(collect[key], t.parameters)) else: collect[key] = t.parameters if not has_redundant_base_types: return union result = pytd.NothingType() done = set() for t in union.type_list: if isinstance(t, pytd.GenericType): key = self._key(t) if key in done: continue # already added parameters = collect[key] add = t.Replace(parameters=tuple(p.Visit(CombineContainers()) for p in parameters)) done.add(key) else: add = t result = pytd_utils.JoinTypes([result, add]) return result class Factorize(visitors.Visitor): """Opposite of ExpandSignatures. Factorizes cartesian products of functions. For example, this transforms def f(x: int, y: int) def f(x: int, y: float) def f(x: float, y: int) def f(x: float, y: float) to def f(x: int or float, y: int or float) """ def _GroupByOmittedArg(self, signatures, i): """Group functions that are identical if you ignore one of the arguments. Arguments: signatures: A list of function signatures i: The index of the argument to ignore during comparison. Returns: A list of tuples (signature, types). "signature" is a signature with argument i omitted, "types" is the list of types that argument was found to have. signatures that don't have argument i are represented as (original, None). """ groups = collections.OrderedDict() for sig in signatures: if i >= len(sig.params): # We can't omit argument i, because this signature has too few # arguments. Represent this signature as (original, None). groups[sig] = None continue if sig.params[i].mutated_type is not None: # We can't group mutable parameters. Leave this signature alone. groups[sig] = None continue # Set type of parameter i to None params = list(sig.params) param_i = params[i] params[i] = param_i.Replace(type=None) stripped_signature = sig.Replace(params=tuple(params)) existing = groups.get(stripped_signature) if existing: existing.append(param_i.type) else: groups[stripped_signature] = [param_i.type] return groups.items() def VisitFunction(self, f): """Shrink a function, by factorizing cartesian products of arguments. Greedily groups signatures, looking at the arguments from left to right. This algorithm is *not* optimal. But it does the right thing for the typical cases. Arguments: f: An instance of pytd.Function. If this function has more than one signature, we will try to combine some of these signatures by introducing union types. Returns: A new, potentially optimized, instance of pytd.Function. """ max_argument_count = max(len(s.params) for s in f.signatures) signatures = f.signatures for i in six.moves.xrange(max_argument_count): new_sigs = [] for sig, types in self._GroupByOmittedArg(signatures, i): if types: # One or more options for argument <i>: new_params = list(sig.params) new_params[i] = sig.params[i].Replace( type=pytd_utils.JoinTypes(types)) sig = sig.Replace(params=tuple(new_params)) new_sigs.append(sig) else: # Signature doesn't have argument <i>, so we store the original: new_sigs.append(sig) signatures = new_sigs return f.Replace(signatures=tuple(signatures)) class ApplyOptionalArguments(visitors.Visitor): """Removes functions that are instances of a more specific case. For example, this reduces def f(x: int, ...) # [1] def f(x: int, y: int) # [2] to just def f(x: int, ...) Because "..." makes it possible to pass any additional arguments to [1], it encompasses both declarations, hence we can omit [2]. """ def _HasShorterVersion(self, sig, optional_arg_sigs): """Find a shorter signature with optional arguments for a longer signature. Arguments: sig: The function signature we'd like to shorten optional_arg_sigs: A set of function signatures with optional arguments that will be matched against sig. Returns: True if there is a shorter signature that generalizes sig, but is not identical to sig. """ param_count = len(sig.params) if not sig.has_optional: param_count += 1 # also consider f(x, y, ...) for f(x, y) for i in six.moves.xrange(param_count): if sig.params[0:i] in optional_arg_sigs: return True return False def VisitFunction(self, f): """Remove all signatures that have a shorter version. We use signatures with optional argument (has_opt=True) as template and then match all signatures against those templates, removing those that match. Arguments: f: An instance of pytd.Function Returns: A potentially simplified instance of pytd.Function. """ # Set of signatures that can replace longer ones. Only used for matching, # hence we can use an unordered data structure. optional_arg_sigs = frozenset(s.params for s in f.signatures if s.has_optional) new_signatures = (s for s in f.signatures if not self._HasShorterVersion(s, optional_arg_sigs)) return f.Replace(signatures=tuple(new_signatures)) class SuperClassHierarchy(object): """Utility class for optimizations working with superclasses.""" def __init__(self, superclasses): self._superclasses = superclasses self._subclasses = utils.invert_dict(self._superclasses) def GetSuperClasses(self): return self._superclasses def _CollectSuperclasses(self, type_name, collect): """Recursively collect super classes for a type. Arguments: type_name: A string, the type's name. collect: A set() of strings, modified to contain all superclasses. """ collect.add(type_name) superclasses = [name for name in self._superclasses.get(type_name, [])] # The superclasses might have superclasses of their own, so recurse. for superclass in superclasses: self._CollectSuperclasses(superclass, collect) def ExpandSuperClasses(self, t): """Generate a list of all (known) superclasses for a type. Arguments: t: A type name. E.g. "int". Returns: A set of types. This set includes t as well as all its superclasses. For example, this will return "bool", "int" and "object" for "bool". """ superclasses = set() self._CollectSuperclasses(t, superclasses) return superclasses def ExpandSubClasses(self, t): """Generate a set of all (known) subclasses for a type. Arguments: t: A type. E.g. NamedType("int"). Returns: A set of types. This set includes t as well as all its subclasses. For example, this will return "int" and "bool" for "int". """ queue = [t] seen = set() while queue: item = queue.pop() if item not in seen: seen.add(item) queue.extend(self._subclasses[item]) return seen def HasSubClassInSet(self, cls, known): """Queries whether a subclass of a type is present in a given set.""" return any(sub in known for sub in self._subclasses[cls]) def HasSuperClassInSet(self, cls, known): """Queries whether a superclass of a type is present in a given set.""" return any(sub in known for sub in self._superclasses[cls]) class SimplifyUnionsWithSuperclasses(visitors.Visitor): """Simplify Unions with superclasses. E.g., this changes int or bool to int since bool is a subclass of int. (Interpreting types as "sets of values", this simplification is sound since A union B = A, if B is a subset of A.) """ def __init__(self, hierarchy): super(SimplifyUnionsWithSuperclasses, self).__init__() self.hierarchy = hierarchy def VisitUnionType(self, union): c = collections.Counter() for t in set(union.type_list): # TODO(rechen): How can we make this work with GenericType? if isinstance(t, pytd.GENERIC_BASE_TYPE): c += collections.Counter(self.hierarchy.ExpandSubClasses(str(t))) # Below, c[str[t]] can be zero - that's the default for non-existent items # in collections.Counter. It'll happen for types that are not # instances of GENERIC_BASE_TYPE, like container types. new_type_list = [t for t in union.type_list if c[str(t)] <= 1] return pytd_utils.JoinTypes(new_type_list) class FindCommonSuperClasses(visitors.Visitor): """Find common super classes. Optionally also uses abstract base classes. E.g., this changes def f(x: list or tuple, y: frozenset or set) -> int or float to def f(x: Sequence, y: Set) -> Real """ def __init__(self, hierarchy): super(FindCommonSuperClasses, self).__init__() self.hierarchy = hierarchy def VisitUnionType(self, union): """Given a union type, try to find a simplification by using superclasses. This is a lossy optimization that tries to map a list of types to a common base type. For example, int and bool are both base classes of int, so it would convert "int or bool" to "int". Arguments: union: A union type. Returns: A simplified type, if available. """ intersection = self.hierarchy.ExpandSuperClasses(str(union.type_list[0])) for t in union.type_list[1:]: intersection.intersection_update( self.hierarchy.ExpandSuperClasses(str(t))) # Remove "redundant" superclasses, by removing everything from the tree # that's not a leaf. I.e., we don't need "object" if we have more # specialized types. new_type_list = tuple( pytd.NamedType(cls) for cls in intersection if not self.hierarchy.HasSubClassInSet(cls, intersection)) if not new_type_list: return union # if types don't intersect, leave them alone return pytd_utils.JoinTypes(new_type_list) class CollapseLongUnions(visitors.Visitor): """Shortens long unions to object (or "?"). Poor man's version of FindCommonSuperClasses. Shorten types like "str or unicode or int or float or list" to just "object" or "?". Additionally, if the union already contains at least one "object", we also potentially replace the entire union with just "object". Attributes: max_length: The maximum number of types to allow in a union. If there are more types than this, it is shortened. """ def __init__(self, max_length=7): assert isinstance(max_length, six.integer_types) super(CollapseLongUnions, self).__init__() self.generic_type = pytd.AnythingType() self.max_length = max_length def VisitUnionType(self, union): if len(union.type_list) > self.max_length: return self.generic_type elif self.generic_type in union.type_list: return self.generic_type else: return union class AdjustGenericType(visitors.Visitor): """Changes the generic type from "object" to "Any".""" def __init__(self): super(AdjustGenericType, self).__init__() self.old_generic_type = pytd.ClassType("__builtin__.object") self.new_generic_type = pytd.AnythingType() def VisitClassType(self, t): if t == self.old_generic_type: return self.new_generic_type else: return t class AdjustReturnAndConstantGenericType(visitors.Visitor): """Changes "object" to "Any" in return and constant types.""" def VisitSignature(self, sig): return sig.Replace(return_type=sig.return_type.Visit(AdjustGenericType())) def VisitConstant(self, c): return c.Replace(type=c.type.Visit(AdjustGenericType())) class AddInheritedMethods(visitors.Visitor): """Copy methods and constants from base classes into their derived classes. E.g. this changes class Bar: [methods and constants of Bar] class Foo(Bar): [methods and constants of Foo] to class Bar: [methods and constants of Bar] class Foo(Bar): [methods and constants of Bar] [methods and constants of Foo] . This is not an optimization by itself, but it can help with other optimizations (like signature merging), and is also useful as preprocessor for type matching. """ def VisitLateType(self, _): raise NotImplementedError("Can't use AddInheritedMethods with LateType.") def VisitClass(self, cls): """Add superclass methods and constants to this Class.""" if any(base for base in cls.parents if isinstance(base, pytd.NamedType)): raise AssertionError("AddInheritedMethods needs a resolved AST") # Filter out only the types we can reason about. # TODO(kramm): Do we want handle UnionTypes and GenericTypes at some point? bases = [base.cls for base in cls.parents if isinstance(base, pytd.ClassType)] # Don't pull in methods that are named the same as existing methods in # this class, local methods override parent class methods. names = {m.name for m in cls.methods} | {c.name for c in cls.constants} # TODO(kramm): This should do full-blown MRO. adjust_self = visitors.AdjustSelf(force=True) adjust_self.class_types.append(visitors.ClassAsType(cls)) new_methods = list(cls.methods) for base in bases: for m in base.methods: if m.name not in names: new_methods.append(m.Visit(adjust_self)) new_constants = list(cls.constants) for base in bases: for c in base.constants: if c.name not in names: new_constants.append(c) return cls.Replace(methods=tuple(new_methods), constants=tuple(new_constants)) class RemoveInheritedMethods(visitors.Visitor): """Removes methods from classes if they also exist in their superclass. E.g. this changes class A: def f(self, y: int) -> bool class B(A): def f(self, y: int) -> bool to class A: def f(self, y: int) -> bool class B(A): pass . """ def __init__(self): super(RemoveInheritedMethods, self).__init__() self.class_to_stripped_signatures = {} def _StrippedSignatures(self, t): """Given a class, list method name + signature without "self". Args: t: A pytd.TYPE. Returns: A set of name + signature tuples, with the self parameter of the signature removed. """ if not isinstance(t, pytd.ClassType): # For union types, generic types etc., inheritance is more complicated. # Be conservative and default to not removing methods inherited from # those. return {} stripped_signatures = {} for method in t.cls.methods: for sig in method.signatures: if (sig.params and sig.params[0].name == "self" and isinstance(sig.params[0].type, pytd.ClassType)): stripped_signatures[method.name] = ( sig.Replace(params=sig.params[1:]), method.is_abstract) return stripped_signatures def _FindNameAndSig(self, classes, name, sig): """Find a tuple(name, signature) in all methods of a type/class.""" if classes: t = classes[0] classes = classes[1:] if t not in self.class_to_stripped_signatures: self.class_to_stripped_signatures[t] = self._StrippedSignatures(t) if name in self.class_to_stripped_signatures[t]: return sig == self.class_to_stripped_signatures[t][name] return self._FindNameAndSig(classes, name, sig) return False def _MaybeRemoveSignature(self, name, sig, is_abstract): """Visit a Signature and return None if we can remove it.""" if (not sig.params or sig.params[0].name != "self" or not isinstance(sig.params[0].type, pytd.ClassType)): return sig # Not a method cls = sig.params[0].type.cls if cls is None: # TODO(kramm): Remove once pytype stops generating ClassType(name, None). return sig try: if self._FindNameAndSig( mro.GetBasesInMRO(cls), name, (sig.Replace(params=sig.params[1:]), is_abstract)): return None # remove (see VisitFunction) except mro.MROError: return sig return sig def _MaybeDeleteFunction(self, f): """Visit a Function and return None if we can remove it.""" signatures = tuple(self._MaybeRemoveSignature(f.name, sig, f.is_abstract) for sig in f.signatures) if any(signatures): if signatures.count(None): return f.Replace( signatures=tuple(s for s in signatures if s is not None)) else: return f # unchanged else: return None # delete function def VisitClass(self, cls): methods = tuple(self._MaybeDeleteFunction(m) for m in cls.methods) if methods.count(None): return cls.Replace(methods=tuple(m for m in methods if m is not None)) else: return cls # unchanged class PullInMethodClasses(visitors.Visitor): """Simplifies classes with only a __call__ function to just a method. This transforms class Foo: m: Bar class Bar: def __call__(self: Foo, ...) to class Foo: def m(self, ...) . """ def __init__(self): super(PullInMethodClasses, self).__init__() self._module = None self._total_count = collections.defaultdict(int) self._processed_count = collections.defaultdict(int) def _MaybeLookup(self, t): if isinstance(t, pytd.NamedType): try: return self._module.Lookup(t.name) except KeyError: return None elif isinstance(t, pytd.ClassType): return t.cls else: return None def _HasSelf(self, sig): """True if a signature has a self parameter. This only checks for the name, since the type can be too many different things (type of the method, type of the parent class, object, unknown etc.) and doesn't carry over to the simplified version, anyway. Arguments: sig: Function signature (instance of pytd.Signature) Returns: True if the signature has "self". """ return sig.params and sig.params[0].name == "self" def _LookupIfSimpleCall(self, t): """Looks up the type if it has only one method, "__call__".""" if not isinstance(t, (pytd.NamedType, pytd.ClassType)): # We only do this for simple types. return None cls = self._MaybeLookup(t) if not isinstance(cls, pytd.Class): # This is not a class or it doesn't exist, so assume it's not a method. return None if [f.name for f in cls.methods] != ["__call__"]: return None method, = cls.methods return cls if all(self._HasSelf(sig) for sig in method.signatures) else None def _CanDelete(self, cls): """Checks whether this class can be deleted. Returns whether all occurences of this class as a type were due to constants we removed. Arguments: cls: A pytd.Class. Returns: True if we can delete this class. """ if not self._processed_count[cls.name]: # Leave standalone classes alone. E.g. the pytd files in # pytd/builtins/ defines classes not used by anything else. return False return self._processed_count[cls.name] == self._total_count[cls.name] def EnterTypeDeclUnit(self, module): # Since modules are hierarchical, we enter TypeDeclUnits multiple times- # but we only want to record the top-level one. if not self._module: self._module = module def VisitTypeDeclUnit(self, unit): return unit.Replace(classes=tuple(c for c in unit.classes if not self._CanDelete(c))) def VisitClassType(self, t): self._total_count[t.name] += 1 return t def VisitNamedType(self, t): self._total_count[t.name] += 1 return t def VisitClass(self, cls): """Visit a class, and change constants to methods where possible.""" new_constants = [] new_methods = list(cls.methods) adjust_self = visitors.AdjustSelf(force=True) adjust_self.class_types.append(visitors.ClassAsType(cls)) for const in cls.constants: c = self._LookupIfSimpleCall(const.type) if c: signatures = c.methods[0].signatures self._processed_count[c.name] += 1 new_method = pytd.Function(const.name, signatures, c.methods[0].kind) new_methods.append(new_method.Visit(adjust_self)) else: new_constants.append(const) # keep return cls.Replace(constants=tuple(new_constants), methods=tuple(new_methods)) class AbsorbMutableParameters(visitors.Visitor): """Converts mutable parameters to unions. This is lossy. For example, this will change def f(x: list[int]): x = list[int or float] to def f(x: list[int] or list[int or float]) . (Use optimize.CombineContainers to then change x to list[int or float].) This also works for methods - it will then potentially change the type of "self". The resulting AST is temporary and needs careful handling. """ def VisitParameter(self, p): if p.mutated_type is None: return p else: return p.Replace(type=pytd_utils.JoinTypes([p.type, p.mutated_type]), mutated_type=None) class SimplifyContainers(visitors.Visitor): """Simplifies containers whose type parameters are all Any. For example, this will change def f() -> List[any] to def f() -> list Note that we don't simplify TupleType or CallableType, since they have variable-length parameters, and the parameter length is meaningful even when the parameters are all Any. """ def _Simplify(self, t): if all(isinstance(p, pytd.AnythingType) for p in t.parameters): return t.base_type else: return t def VisitGenericType(self, t): return self._Simplify(t) class TypeParameterScope(visitors.Visitor): """Common superclass for optimizations that track type parameters.""" def __init__(self): super(TypeParameterScope, self).__init__() self.type_params_stack = [{}] def EnterClass(self, cls): new = self.type_params_stack[-1].copy() new.update({t.type_param: cls for t in cls.template}) self.type_params_stack.append(new) def EnterSignature(self, sig): new = self.type_params_stack[-1].copy() new.update({t.type_param: sig for t in sig.template}) self.type_params_stack.append(new) def IsClassTypeParameter(self, type_param): class_or_sig = self.type_params_stack[-1].get(type_param) return isinstance(class_or_sig, pytd.Class) def IsFunctionTypeParameter(self, type_param): class_or_sig = self.type_params_stack[-1].get(type_param) return isinstance(class_or_sig, pytd.Signature) def LeaveClass(self, _): self.type_params_stack.pop() def LeaveSignature(self, _): self.type_params_stack.pop() class MergeTypeParameters(TypeParameterScope): """Remove all function type parameters in a union with a class type param. For example, this will change class A(typing.Generic(T)): def append(self, T or T2) -> T2 to class A(typing.Generic(T)): def append(self, T) -> T . Use this visitor after using AbsorbMutableParameters. As another example, the combination of AbsorbMutableParameters and MergeTypeParameters transforms class list(typing.Generic(T)): def append(self, v: T2) -> NoneType: self = T or T2 to class list(typing.Generic(T')): def append(self, V:T') -> NoneType by creating a *new* template variable T' that propagates the mutations to the outermost level (in this example, T' = T or T2) """ def __init__(self): super(MergeTypeParameters, self).__init__() self.type_param_union = None def _AppendNew(self, l1, l2): """Appends all items to l1 that are not in l2.""" # l1 and l2 are small (2-3 elements), so just use two loops. for e2 in l2: if not any(e1 is e2 for e1 in l1): l1.append(e2) def EnterSignature(self, node): # Necessary because TypeParameterScope also defines this function super(MergeTypeParameters, self).EnterSignature(node) assert self.type_param_union is None self.type_param_union = collections.defaultdict(list) def LeaveSignature(self, node): # Necessary because TypeParameterScope also defines this function super(MergeTypeParameters, self).LeaveSignature(node) self.type_param_union = None def VisitUnionType(self, u): type_params = [t for t in u.type_list if isinstance(t, pytd.TypeParameter)] for t in type_params: if self.IsFunctionTypeParameter(t): self._AppendNew(self.type_param_union[t.name], type_params) return u def _AllContaining(self, type_param, seen=None): """Gets all type parameters that are in a union with the passed one.""" seen = seen or set() result = [type_param] for other in self.type_param_union[type_param.name]: if other in seen: continue # break cycles seen.add(other) self._AppendNew(result, self._AllContaining(other, seen) or [other]) return result def _ReplaceByOuterIfNecessary(self, item, substitutions): """Potentially replace a function type param with a class type param. Args: item: A pytd.TemplateItem substitutions: A dictionary to update with what we replaced. Returns: Either [item] or []. """ containing_union = self._AllContaining(item.type_param) if not containing_union: return [item] class_type_parameters = [type_param for type_param in containing_union if self.IsClassTypeParameter(type_param)] if class_type_parameters: substitutions[item.type_param] = pytd_utils.JoinTypes( class_type_parameters) return [] else: # It's a function type parameter that appears in a union with other # function type parameters. # TODO(kramm): We could merge those, too. return [item] def VisitSignature(self, sig): new_template = [] substitutions = {k: k for k in self.type_params_stack[-1]} for item in sig.template: new_template += self._ReplaceByOuterIfNecessary(item, substitutions) if sig.template == new_template: return sig # Nothing changed. else: return sig.Replace(template=tuple(new_template)).Visit( visitors.ReplaceTypeParameters(substitutions)).Visit(SimplifyUnions()) def Optimize(node, builtins=None, lossy=False, use_abcs=False, max_union=7, remove_mutable=False, can_do_lookup=True): """Optimize a PYTD tree. Tries to shrink a PYTD tree by applying various optimizations. Arguments: node: A pytd node to be optimized. It won't be modified - this function will return a new node. builtins: Definitions of all of the external types in node. lossy: Allow optimizations that change the meaning of the pytd. use_abcs: Use abstract base classes to represent unions like e.g. "float or int" as "Real". max_union: How many types we allow in a union before we simplify it to just "object". remove_mutable: Whether to simplify mutable parameters to normal parameters. can_do_lookup: True: We're either allowed to try to resolve NamedType instances in the AST, or the AST is already resolved. False: Skip any optimizations that would require NamedTypes to be resolved. Returns: An optimized node. """ node = node.Visit(RemoveDuplicates()) node = node.Visit(SimplifyUnions()) node = node.Visit(CombineReturnsAndExceptions()) node = node.Visit(Factorize()) node = node.Visit(ApplyOptionalArguments()) node = node.Visit(CombineContainers()) node = node.Visit(SimplifyContainers()) if builtins: superclasses = builtins.Visit(visitors.ExtractSuperClassesByName()) superclasses.update(node.Visit(visitors.ExtractSuperClassesByName())) if use_abcs: superclasses.update(abc_hierarchy.GetSuperClasses()) hierarchy = SuperClassHierarchy(superclasses) node = node.Visit(SimplifyUnionsWithSuperclasses(hierarchy)) if lossy: node = node.Visit(FindCommonSuperClasses(hierarchy)) if max_union: node = node.Visit(CollapseLongUnions(max_union)) node = node.Visit(AdjustReturnAndConstantGenericType()) if remove_mutable: node = node.Visit(AbsorbMutableParameters()) node = node.Visit(CombineContainers()) node = node.Visit(MergeTypeParameters()) node = node.Visit(visitors.AdjustSelf()) node = node.Visit(SimplifyContainers()) if builtins and can_do_lookup: node = visitors.LookupClasses(node, builtins, ignore_late_types=True) node = node.Visit(RemoveInheritedMethods()) node = node.Visit(RemoveRedundantSignatures(hierarchy)) return node
pytype/pytd/optimize.py
39,279
Converts mutable parameters to unions. This is lossy. For example, this will change def f(x: list[int]): x = list[int or float] to def f(x: list[int] or list[int or float]) . (Use optimize.CombineContainers to then change x to list[int or float].) This also works for methods - it will then potentially change the type of "self". The resulting AST is temporary and needs careful handling. Copy methods and constants from base classes into their derived classes. E.g. this changes class Bar: [methods and constants of Bar] class Foo(Bar): [methods and constants of Foo] to class Bar: [methods and constants of Bar] class Foo(Bar): [methods and constants of Bar] [methods and constants of Foo] . This is not an optimization by itself, but it can help with other optimizations (like signature merging), and is also useful as preprocessor for type matching. Changes the generic type from "object" to "Any". Changes "object" to "Any" in return and constant types. Removes functions that are instances of a more specific case. For example, this reduces def f(x: int, ...) # [1] def f(x: int, y: int) # [2] to just def f(x: int, ...) Because "..." makes it possible to pass any additional arguments to [1], it encompasses both declarations, hence we can omit [2]. Shortens long unions to object (or "?"). Poor man's version of FindCommonSuperClasses. Shorten types like "str or unicode or int or float or list" to just "object" or "?". Additionally, if the union already contains at least one "object", we also potentially replace the entire union with just "object". Attributes: max_length: The maximum number of types to allow in a union. If there are more types than this, it is shortened. Change unions of containers to containers of unions. For example, this transforms list[int] or list[float] to list[int or float] . Group function signatures that only differ in exceptions or return values. For example, this transforms def f(x: int) -> float: raise OverflowError() def f(x: int) -> int: raise IndexError() to def f(x: int) -> float or int: raise IndexError() raise OverflowError() Opposite of ExpandSignatures. Factorizes cartesian products of functions. For example, this transforms def f(x: int, y: int) def f(x: int, y: float) def f(x: float, y: int) def f(x: float, y: float) to def f(x: int or float, y: int or float) Find common super classes. Optionally also uses abstract base classes. E.g., this changes def f(x: list or tuple, y: frozenset or set) -> int or float to def f(x: Sequence, y: Set) -> Real Remove all function type parameters in a union with a class type param. For example, this will change class A(typing.Generic(T)): def append(self, T or T2) -> T2 to class A(typing.Generic(T)): def append(self, T) -> T . Use this visitor after using AbsorbMutableParameters. As another example, the combination of AbsorbMutableParameters and MergeTypeParameters transforms class list(typing.Generic(T)): def append(self, v: T2) -> NoneType: self = T or T2 to class list(typing.Generic(T')): def append(self, V:T') -> NoneType by creating a *new* template variable T' that propagates the mutations to the outermost level (in this example, T' = T or T2) Simplifies classes with only a __call__ function to just a method. This transforms class Foo: m: Bar class Bar: def __call__(self: Foo, ...) to class Foo: def m(self, ...) . Remove duplicate function signatures. For example, this transforms def f(x: int) -> float def f(x: int) -> float to def f(x: int) -> float In order to be removed, a signature has to be exactly identical to an existing one. Removes methods from classes if they also exist in their superclass. E.g. this changes class A: def f(self, y: int) -> bool class B(A): def f(self, y: int) -> bool to class A: def f(self, y: int) -> bool class B(A): pass . Remove duplicate function signatures. For example, this transforms def f(x: int) -> float def f(x: int or float) -> float to def f(x: int or float) -> float In order to be removed, a signature has to be "contained" (a subclass of) an existing one. Give unknowns that map to the same set of concrete types the same name. Simplifies containers whose type parameters are all Any. For example, this will change def f() -> List[any] to def f() -> list Note that we don't simplify TupleType or CallableType, since they have variable-length parameters, and the parameter length is meaningful even when the parameters are all Any. Remove duplicate or redundant entries in union types. For example, this transforms a: int or int b: int or ? c: int or (int or float) to a: int b: ? c: int or float Simplify Unions with superclasses. E.g., this changes int or bool to int since bool is a subclass of int. (Interpreting types as "sets of values", this simplification is sound since A union B = A, if B is a subset of A.) Utility class for optimizations working with superclasses. Common superclass for optimizations that track type parameters. Mutable class for collecting return types and exceptions of functions. The collecting is stable: Items are kept in the order in which they were encountered. Attributes: return_types: Return types seen so far. exceptions: Exceptions seen so far. Generate a set of all (known) subclasses for a type. Arguments: t: A type. E.g. NamedType("int"). Returns: A set of types. This set includes t as well as all its subclasses. For example, this will return "int" and "bool" for "int". Generate a list of all (known) superclasses for a type. Arguments: t: A type name. E.g. "int". Returns: A set of types. This set includes t as well as all its superclasses. For example, this will return "bool", "int" and "object" for "bool". Queries whether a subclass of a type is present in a given set. Queries whether a superclass of a type is present in a given set. Optimize a PYTD tree. Tries to shrink a PYTD tree by applying various optimizations. Arguments: node: A pytd node to be optimized. It won't be modified - this function will return a new node. builtins: Definitions of all of the external types in node. lossy: Allow optimizations that change the meaning of the pytd. use_abcs: Use abstract base classes to represent unions like e.g. "float or int" as "Real". max_union: How many types we allow in a union before we simplify it to just "object". remove_mutable: Whether to simplify mutable parameters to normal parameters. can_do_lookup: True: We're either allowed to try to resolve NamedType instances in the AST, or the AST is already resolved. False: Skip any optimizations that would require NamedTypes to be resolved. Returns: An optimized node. Add the return types / exceptions of a signature to this instance. Add superclass methods and constants to this Class. Visit a class, and change constants to methods where possible. Merge signatures of a function. This groups signatures by arguments and then for each group creates a single signature that joins the return values / exceptions using "or". Arguments: f: A pytd.Function instance Returns: Function with simplified / combined signatures. Shrink a function, by factorizing cartesian products of arguments. Greedily groups signatures, looking at the arguments from left to right. This algorithm is *not* optimal. But it does the right thing for the typical cases. Arguments: f: An instance of pytd.Function. If this function has more than one signature, we will try to combine some of these signatures by introducing union types. Returns: A new, potentially optimized, instance of pytd.Function. Remove all signatures that have a shorter version. We use signatures with optional argument (has_opt=True) as template and then match all signatures against those templates, removing those that match. Arguments: f: An instance of pytd.Function Returns: A potentially simplified instance of pytd.Function. Push unions down into containers. This collects similar container types in unions and merges them into single instances with the union type pushed down to the element_type level. Arguments: union: A pytd.Union instance. Might appear in a parameter, a return type, a constant type, etc. Returns: A simplified pytd.Union. Given a union type, try to find a simplification by using superclasses. This is a lossy optimization that tries to map a list of types to a common base type. For example, int and bool are both base classes of int, so it would convert "int or bool" to "int". Arguments: union: A union type. Returns: A simplified type, if available. Gets all type parameters that are in a union with the passed one. Appends all items to l1 that are not in l2. Checks whether this class can be deleted. Returns whether all occurences of this class as a type were due to constants we removed. Arguments: cls: A pytd.Class. Returns: True if we can delete this class. Recursively collect super classes for a type. Arguments: type_name: A string, the type's name. collect: A set() of strings, modified to contain all superclasses. Find a tuple(name, signature) in all methods of a type/class. Groups signatures by arguments. Arguments: signatures: A list of function signatures (Signature instances). Returns: A dictionary mapping signatures (without return and exceptions) to a tuple of return values and exceptions. Group functions that are identical if you ignore one of the arguments. Arguments: signatures: A list of function signatures i: The index of the argument to ignore during comparison. Returns: A list of tuples (signature, types). "signature" is a signature with argument i omitted, "types" is the list of types that argument was found to have. signatures that don't have argument i are represented as (original, None). True if a signature has a self parameter. This only checks for the name, since the type can be too many different things (type of the method, type of the parent class, object, unknown etc.) and doesn't carry over to the simplified version, anyway. Arguments: sig: Function signature (instance of pytd.Signature) Returns: True if the signature has "self". Find a shorter signature with optional arguments for a longer signature. Arguments: sig: The function signature we'd like to shorten optional_arg_sigs: A set of function signatures with optional arguments that will be matched against sig. Returns: True if there is a shorter signature that generalizes sig, but is not identical to sig. Looks up the type if it has only one method, "__call__". Visit a Function and return None if we can remove it. Visit a Signature and return None if we can remove it. Potentially replace a function type param with a class type param. Args: item: A pytd.TemplateItem substitutions: A dictionary to update with what we replaced. Returns: Either [item] or []. Given a class, list method name + signature without "self". Args: t: A pytd.TYPE. Returns: A set of name + signature tuples, with the self parameter of the signature removed. Determine whether pytd_type values in the union should be merged. If the union contains the homogeneous flavor of pytd_type (e.g., GenericType(base_type=tuple) when pytd_type is TupleType), or pytd_type values of different lengths, we want to turn all of the pytd_type values into homogeneous ones so that they can be merged into a single container. Args: pytd_type: The pytd type, either TupleType or CallableType. union: a pytd.UnionType Returns: True if the pytd_type values should be merged, False otherwise. Functions for optimizing pytd syntax trees. pytd files come from various sources, and are typically redundant (duplicate functions, different signatures saying the same thing, overlong type disjunctions). The Visitors in this file remove various forms of these redundancies. -*- coding:utf-8; python-indent:2; indent-tabs-mode:nil -*- Copyright 2013 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. We remove duplicates, but keep existing entries in the same order. Preserve the identify of each type parameter, and don't allow them to match against anything by themselves. We keep track of which signature matched which other signatures, purely for optimization - that way we don't have to query the reverse direction. We don't support matching of exceptions. type_match doesn't support polymorphic functions on the left side yet. Signature -> ReturnsAndExceptions Optimization: If we're not going to change anything, return original. flatten already added We can't omit argument i, because this signature has too few arguments. Represent this signature as (original, None). We can't group mutable parameters. Leave this signature alone. Set type of parameter i to None One or more options for argument <i>: Signature doesn't have argument <i>, so we store the original: also consider f(x, y, ...) for f(x, y) Set of signatures that can replace longer ones. Only used for matching, hence we can use an unordered data structure. The superclasses might have superclasses of their own, so recurse. TODO(rechen): How can we make this work with GenericType? Below, c[str[t]] can be zero - that's the default for non-existent items in collections.Counter. It'll happen for types that are not instances of GENERIC_BASE_TYPE, like container types. Remove "redundant" superclasses, by removing everything from the tree that's not a leaf. I.e., we don't need "object" if we have more specialized types. if types don't intersect, leave them alone Filter out only the types we can reason about. TODO(kramm): Do we want handle UnionTypes and GenericTypes at some point? Don't pull in methods that are named the same as existing methods in this class, local methods override parent class methods. TODO(kramm): This should do full-blown MRO. For union types, generic types etc., inheritance is more complicated. Be conservative and default to not removing methods inherited from those. Not a method TODO(kramm): Remove once pytype stops generating ClassType(name, None). remove (see VisitFunction) unchanged delete function unchanged We only do this for simple types. This is not a class or it doesn't exist, so assume it's not a method. Leave standalone classes alone. E.g. the pytd files in pytd/builtins/ defines classes not used by anything else. Since modules are hierarchical, we enter TypeDeclUnits multiple times- but we only want to record the top-level one. keep l1 and l2 are small (2-3 elements), so just use two loops. Necessary because TypeParameterScope also defines this function Necessary because TypeParameterScope also defines this function break cycles It's a function type parameter that appears in a union with other function type parameters. TODO(kramm): We could merge those, too. Nothing changed.
15,545
en
0.817564
""" --- Ångström --- Read, manipulate and analyze molecular trajectory files. """ from .read import read_xyz_traj from .write import write_xyz_traj from angstrom.geometry import get_molecule_center from angstrom import Molecule import numpy as np import os class Trajectory: """ Reading and analyzing trajectories in xyz format. """ def __init__(self, atoms=None, coordinates=None, read=None, molecule=None): """ Create a trajectory object. Parameters ---------- atoms : list or None List of elements of the molecule for each frame. coordinates : list or None List of atomic positions of the molecule for each frame. read : str or None File name to read molecule file (formats: xyz). molecule : Molecule Create a Trajectory with 1 frame from a Molecule object. """ self.name = 'Trajectory' if atoms is not None and coordinates is not None: self.atoms = atoms self.coordinates = coordinates elif read is not None: self.read(read) elif molecule is not None: self.atoms = np.array([molecule.atoms]) self.coordinates = np.array([molecule.coordinates]) self.name = molecule.name else: self.atoms = [] self.coordinates = [] self.current_frame = 0 def __repr__(self): """ Returns basic trajectory info. """ return "<Trajectory frames: %i | atoms: %i | dimensions: %i>" % tuple(np.shape(self.coordinates)) def __len__(self): """ Returns number of frames. """ return len(self.atoms) def __add__(self, traj): """ Trajectory addition for joining the coordinates and elements into a new Trajectory object. Parameters ---------- traj : Trajectory Trajectory object to be added Returns ------- Trajectory Joined Trajectory object. """ new_traj = Trajectory(atoms=np.append(self.atoms, traj.atoms, axis=0), coordinates=np.append(self.coordinates, traj.coordinates, axis=0)) return new_traj def __getitem__(self, i): """ Indexing method. Returns a Molecule object for given index (frame). Returns a Trajectory object if used as slicing. """ if isinstance(i, slice): indices = range(len(self))[i.start:i.stop:i.step] if len(indices) == 0: return [] else: new_traj = Trajectory(molecule=self[indices[0]]) for j in indices[1:]: new_traj.append(self[j]) return new_traj else: return Molecule(atoms=self.atoms[i], coordinates=self.coordinates[i]) def __iter__(self): """ Initialize iterator, reset frame index. """ self.current_frame = 0 return self def __next__(self): """ Returns the next frame in Trajectory as a Molecule object. """ if self.current_frame >= len(self): raise StopIteration next_mol = self[self.current_frame] self.current_frame += 1 return next_mol def append(self, mol): """ Append molecule to trajectory. The number of atoms in the molecule must match that of the trajectory. Parameters ---------- mol : Molecule Molecule object to be added Returns ------- None Added to Trajectory object. """ if len(mol.atoms) != self.atoms.shape[1]: raise Exception('Trajectory cannot have different number of atoms per frame') self.atoms = np.append(self.atoms, [mol.atoms], axis=0) self.coordinates = np.append(self.coordinates, [mol.coordinates], axis=0) def read(self, filename): """ Read xyz formatted trajectory file. Parameters ---------- filename : str Trajectory file name. Returns ------- None Assigns 'coordinates', 'atoms', and 'headers' attributes. """ self.name = os.path.splitext(os.path.basename(filename))[0] traj = read_xyz_traj(filename) self.atoms, self.coordinates, self.headers = traj['atoms'], traj['coordinates'], traj['headers'] def write(self, filename): """ Write xyz formatted trajectory file. Parameters ---------- filename : str Trajectory file name (formats: xyz). Returns ------- None Writes molecule information to given file name. """ with open(filename, 'w') as traj_file: if hasattr(self, 'headers'): write_xyz_traj(traj_file, self.atoms, self.coordinates, headers=self.headers) else: write_xyz_traj(traj_file, self.atoms, self.coordinates) def get_center(self, mass=True): """ Get coordinates of molecule center at each frame. Parameters ---------- mass : bool Calculate center of mass (True) or geometric center (False). Returns ------- ndarray Molecule center coordinates for each frame. """ centers = np.empty((len(self.atoms), 3)) for f, (frame_atoms, frame_coors) in enumerate(zip(self.atoms, self.coordinates)): centers[f] = get_molecule_center(frame_atoms, frame_coors, mass=mass) return centers
angstrom/trajectory/trajectory.py
5,739
Reading and analyzing trajectories in xyz format. Trajectory addition for joining the coordinates and elements into a new Trajectory object. Parameters ---------- traj : Trajectory Trajectory object to be added Returns ------- Trajectory Joined Trajectory object. Indexing method. Returns a Molecule object for given index (frame). Returns a Trajectory object if used as slicing. Create a trajectory object. Parameters ---------- atoms : list or None List of elements of the molecule for each frame. coordinates : list or None List of atomic positions of the molecule for each frame. read : str or None File name to read molecule file (formats: xyz). molecule : Molecule Create a Trajectory with 1 frame from a Molecule object. Initialize iterator, reset frame index. Returns number of frames. Returns the next frame in Trajectory as a Molecule object. Returns basic trajectory info. Append molecule to trajectory. The number of atoms in the molecule must match that of the trajectory. Parameters ---------- mol : Molecule Molecule object to be added Returns ------- None Added to Trajectory object. Get coordinates of molecule center at each frame. Parameters ---------- mass : bool Calculate center of mass (True) or geometric center (False). Returns ------- ndarray Molecule center coordinates for each frame. Read xyz formatted trajectory file. Parameters ---------- filename : str Trajectory file name. Returns ------- None Assigns 'coordinates', 'atoms', and 'headers' attributes. Write xyz formatted trajectory file. Parameters ---------- filename : str Trajectory file name (formats: xyz). Returns ------- None Writes molecule information to given file name. --- Ångström --- Read, manipulate and analyze molecular trajectory files.
1,807
en
0.47647
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE from __future__ import absolute_import import sys import numpy import awkward as ak def convert_to_array(layout, args, kwargs): out = ak.operations.convert.to_numpy(layout, allow_missing=False) if args == () and kwargs == {}: return out else: return numpy.array(out, *args, **kwargs) implemented = {} def array_function(func, types, args, kwargs): function = implemented.get(func) if function is None: return NotImplemented else: return function(*args, **kwargs) def implements(numpy_function): def decorator(function): implemented[getattr(numpy, numpy_function)] = function return function return decorator def array_ufunc(ufunc, method, inputs, kwargs): if method != "__call__" or len(inputs) == 0 or "out" in kwargs: return NotImplemented behavior = ak._util.behaviorof(*inputs) nextinputs = [] for x in inputs: cast_fcn = ak._util.custom_cast(x, behavior) if cast_fcn is not None: x = cast_fcn(x) nextinputs.append( ak.operations.convert.to_layout(x, allow_record=True, allow_other=True) ) inputs = nextinputs def adjust(custom, inputs, kwargs): args = [ ak._util.wrap(x, behavior) if isinstance(x, (ak.layout.Content, ak.layout.Record)) else x for x in inputs ] out = custom(*args, **kwargs) if not isinstance(out, tuple): out = (out,) return tuple( x.layout if isinstance(x, (ak.highlevel.Array, ak.highlevel.Record)) else x for x in out ) def adjust_apply_ufunc(apply_ufunc, ufunc, method, inputs, kwargs): nextinputs = [ ak._util.wrap(x, behavior) if isinstance(x, (ak.layout.Content, ak.layout.Record)) else x for x in inputs ] out = apply_ufunc(ufunc, method, nextinputs, kwargs) if out is NotImplemented: return None else: if not isinstance(out, tuple): out = (out,) out = tuple( x.layout if isinstance(x, (ak.highlevel.Array, ak.highlevel.Record)) else x for x in out ) return lambda: out def is_fully_regular(layout): if ( isinstance(layout, ak.layout.RegularArray) and layout.parameter("__record__") is None and layout.parameter("__array__") is None ): if isinstance(layout.content, ak.layout.NumpyArray): return True elif isinstance(layout.content, ak.layout.RegularArray): return is_fully_regular(layout.content) else: return False else: return False def deregulate(layout): if not is_fully_regular(layout): return layout else: shape = [len(layout)] node = layout while isinstance(node, ak.layout.RegularArray): shape.append(node.size) node = node.content nparray = ak.nplike.of(node).asarray(node) nparray = nparray.reshape(tuple(shape) + nparray.shape[1:]) return ak.layout.NumpyArray( nparray, node.identities, node.parameters, ) def getfunction(inputs): signature = [ufunc] for x in inputs: if isinstance(x, ak.layout.Content): record = x.parameter("__record__") array = x.parameter("__array__") if record is not None: signature.append(record) elif array is not None: signature.append(array) elif isinstance(x, ak.layout.NumpyArray): signature.append(ak.nplike.of(x).asarray(x).dtype.type) else: signature.append(None) else: signature.append(type(x)) custom = ak._util.overload(behavior, signature) if custom is not None: return lambda: adjust(custom, inputs, kwargs) if ufunc is numpy.matmul: custom_matmul = getfunction_matmul(inputs) if custom_matmul is not None: return custom_matmul inputs = [deregulate(x) for x in inputs] if all( isinstance(x, ak.layout.NumpyArray) or not isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray)) for x in inputs ): nplike = ak.nplike.of(*inputs) result = getattr(ufunc, method)( *[nplike.asarray(x) for x in inputs], **kwargs ) return lambda: (ak.operations.convert.from_numpy(result, highlevel=False),) for x in inputs: if isinstance(x, ak.layout.Content): chained_behavior = ak._util.Behavior(ak.behavior, behavior) apply_ufunc = chained_behavior[numpy.ufunc, x.parameter("__array__")] if apply_ufunc is not None: out = adjust_apply_ufunc(apply_ufunc, ufunc, method, inputs, kwargs) if out is not None: return out apply_ufunc = chained_behavior[numpy.ufunc, x.parameter("__record__")] if apply_ufunc is not None: out = adjust_apply_ufunc(apply_ufunc, ufunc, method, inputs, kwargs) if out is not None: return out if all( x.parameter("__array__") is not None or x.parameter("__record__") is not None for x in inputs if isinstance(x, ak.layout.Content) ): custom_types = [] for x in inputs: if isinstance(x, ak.layout.Content): if x.parameter("__array__") is not None: custom_types.append(x.parameter("__array__")) elif x.parameter("__record__") is not None: custom_types.append(x.parameter("__record__")) else: custom_types.append(type(x).__name__) else: custom_types.append(type(x).__name__) raise ValueError( "no overloads for custom types: {0}({1})".format( ufunc.__name__, ", ".join(custom_types), ) + ak._util.exception_suffix(__file__) ) return None out = ak._util.broadcast_and_apply( inputs, getfunction, behavior, allow_records=False, pass_depth=False ) assert isinstance(out, tuple) and len(out) == 1 return ak._util.wrap(out[0], behavior) def matmul_for_numba(lefts, rights, dtype): total_outer = 0 total_inner = 0 total_content = 0 for A, B in zip(lefts, rights): first = -1 for Ai in A: if first == -1: first = len(Ai) elif first != len(Ai): raise ValueError( "one of the left matrices in np.matmul is not rectangular" ) if first == -1: first = 0 rowsA = len(A) colsA = first first = -1 for Bi in B: if first == -1: first = len(Bi) elif first != len(Bi): raise ValueError( "one of the right matrices in np.matmul is not rectangular" ) if first == -1: first = 0 rowsB = len(B) colsB = first if colsA != rowsB: raise ValueError( u"one of the pairs of matrices in np.matmul do not match shape: " u"(n \u00d7 k) @ (k \u00d7 m)" ) total_outer += 1 total_inner += rowsA total_content += rowsA * colsB outer = numpy.empty(total_outer + 1, numpy.int64) inner = numpy.empty(total_inner + 1, numpy.int64) content = numpy.zeros(total_content, dtype) outer[0] = 0 inner[0] = 0 outer_i = 1 inner_i = 1 content_i = 0 for A, B in zip(lefts, rights): rows = len(A) cols = 0 if len(B) > 0: cols = len(B[0]) mids = 0 if len(A) > 0: mids = len(A[0]) for i in range(rows): for j in range(cols): for v in range(mids): pos = content_i + i * cols + j content[pos] += A[i][v] * B[v][j] outer[outer_i] = outer[outer_i - 1] + rows outer_i += 1 for _ in range(rows): inner[inner_i] = inner[inner_i - 1] + cols inner_i += 1 content_i += rows * cols return outer, inner, content matmul_for_numba.numbafied = None def getfunction_matmul(inputs): inputs = [ ak._util.recursively_apply( x, (lambda _: _), pass_depth=False, numpy_to_regular=True ) for x in inputs ] if len(inputs) == 2 and all( isinstance(x, ak._util.listtypes) and isinstance(x.content, ak._util.listtypes) and isinstance(x.content.content, ak.layout.NumpyArray) for x in inputs ): ak._connect._numba.register_and_check() import numba if matmul_for_numba.numbafied is None: matmul_for_numba.numbafied = numba.njit(matmul_for_numba) lefts = ak.highlevel.Array(inputs[0]) rights = ak.highlevel.Array(inputs[1]) dtype = numpy.asarray(lefts[0:0, 0:0, 0:0] + rights[0:0, 0:0, 0:0]).dtype outer, inner, content = matmul_for_numba.numbafied(lefts, rights, dtype) return lambda: ( ak.layout.ListOffsetArray64( ak.layout.Index64(outer), ak.layout.ListOffsetArray64( ak.layout.Index64(inner), ak.layout.NumpyArray(content), ), ), ) else: return None try: NDArrayOperatorsMixin = numpy.lib.mixins.NDArrayOperatorsMixin except AttributeError: from numpy.core import umath as um def _disables_array_ufunc(obj): try: return obj.__array_ufunc__ is None except AttributeError: return False def _binary_method(ufunc, name): def func(self, other): if _disables_array_ufunc(other): return NotImplemented return ufunc(self, other) func.__name__ = "__{}__".format(name) return func def _reflected_binary_method(ufunc, name): def func(self, other): if _disables_array_ufunc(other): return NotImplemented return ufunc(other, self) func.__name__ = "__r{}__".format(name) return func def _inplace_binary_method(ufunc, name): def func(self, other): return ufunc(self, other, out=(self,)) func.__name__ = "__i{}__".format(name) return func def _numeric_methods(ufunc, name): return ( _binary_method(ufunc, name), _reflected_binary_method(ufunc, name), _inplace_binary_method(ufunc, name), ) def _unary_method(ufunc, name): def func(self): return ufunc(self) func.__name__ = "__{}__".format(name) return func class NDArrayOperatorsMixin(object): __lt__ = _binary_method(um.less, "lt") __le__ = _binary_method(um.less_equal, "le") __eq__ = _binary_method(um.equal, "eq") __ne__ = _binary_method(um.not_equal, "ne") __gt__ = _binary_method(um.greater, "gt") __ge__ = _binary_method(um.greater_equal, "ge") __add__, __radd__, __iadd__ = _numeric_methods(um.add, "add") __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, "sub") __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, "mul") __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(um.matmul, "matmul") if sys.version_info.major < 3: __div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, "div") __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( um.true_divide, "truediv" ) __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( um.floor_divide, "floordiv" ) __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, "mod") if hasattr(um, "divmod"): __divmod__ = _binary_method(um.divmod, "divmod") __rdivmod__ = _reflected_binary_method(um.divmod, "divmod") __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, "pow") __lshift__, __rlshift__, __ilshift__ = _numeric_methods(um.left_shift, "lshift") __rshift__, __rrshift__, __irshift__ = _numeric_methods( um.right_shift, "rshift" ) __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, "and") __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, "xor") __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, "or") __neg__ = _unary_method(um.negative, "neg") if hasattr(um, "positive"): __pos__ = _unary_method(um.positive, "pos") __abs__ = _unary_method(um.absolute, "abs") __invert__ = _unary_method(um.invert, "invert")
src/awkward/_connect/_numpy.py
13,672
BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
85
en
0.5876
# Generated by Django 3.0.4 on 2020-03-26 03:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ViralScreener', '0008_auto_20200326_0338'), ] operations = [ migrations.AlterField( model_name='employeescreeningresponses', name='DateTime', field=models.DateTimeField(), ), ]
mysite/ViralScreener/migrations/0009_auto_20200326_0339.py
405
Generated by Django 3.0.4 on 2020-03-26 03:39
45
en
0.663837
# Crie um programa onde o usuário possa digitar sete valores numéricos # e cadastre-os em uma lista única que mantenha separados os valores pares e ímpares. # No final, mostre os valores pares e ímpares em ordem crescente. lista_unic = [[], []] print('-=' * 20) for c in range(0, 7): nums = int(input(f'Informe um {c+1}° valor: ')) if nums%2 == 0: lista_unic[0].append(nums) else: lista_unic[1].append(nums) print('-=-' * 30) lista_unic[0].sort() lista_unic[1].sort() print(f'Os valores pares foram: {lista_unic[0]}') print(f'Os valores ímpares foram: {lista_unic[1]}') print('-=-' * 30)
CursoemVideo - Python 3/aula 18/ex085.py
623
Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os em uma lista única que mantenha separados os valores pares e ímpares. No final, mostre os valores pares e ímpares em ordem crescente.
216
pt
0.961497
# Generated by Django 3.2.4 on 2021-06-05 20:41 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0012_alter_user_first_name_max_length'), ] operations = [ migrations.CreateModel( name='CustomUser', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name': 'user', 'verbose_name_plural': 'users', 'abstract': False, }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
accounts/migrations/0001_initial.py
2,879
Generated by Django 3.2.4 on 2021-06-05 20:41
45
en
0.720238
import numpy as np import pytest import xarray as xr import cf_xarray as cfxr from . import requires_shapely @pytest.fixture def geometry_ds(): from shapely.geometry import MultiPoint, Point # empty/fill workaround to avoid numpy deprecation(warning) due to the array interface of shapely geometries. geoms = np.empty(4, dtype=object) geoms[:] = [ MultiPoint([(1.0, 2.0), (2.0, 3.0)]), Point(3.0, 4.0), Point(4.0, 5.0), Point(3.0, 4.0), ] ds = xr.Dataset( { "data": xr.DataArray(range(len(geoms)), dims=("index",)), "time": xr.DataArray([0, 0, 0, 1], dims=("index",)), } ) shp_ds = ds.assign(geometry=xr.DataArray(geoms, dims=("index",))) cf_ds = ds.assign( x=xr.DataArray([1.0, 2.0, 3.0, 4.0, 3.0], dims=("node",), attrs={"axis": "X"}), y=xr.DataArray([2.0, 3.0, 4.0, 5.0, 4.0], dims=("node",), attrs={"axis": "Y"}), node_count=xr.DataArray([2, 1, 1, 1], dims=("index",)), crd_x=xr.DataArray([1.0, 3.0, 4.0, 3.0], dims=("index",), attrs={"nodes": "x"}), crd_y=xr.DataArray([2.0, 4.0, 5.0, 4.0], dims=("index",), attrs={"nodes": "y"}), geometry_container=xr.DataArray( attrs={ "geometry_type": "point", "node_count": "node_count", "node_coordinates": "x y", "coordinates": "crd_x crd_y", } ), ) cf_ds = cf_ds.set_coords(["x", "y", "crd_x", "crd_y"]) return cf_ds, shp_ds @requires_shapely def test_shapely_to_cf(geometry_ds): from shapely.geometry import Point expected, in_ds = geometry_ds out = xr.merge([in_ds.drop_vars("geometry"), cfxr.shapely_to_cf(in_ds.geometry)]) xr.testing.assert_identical(out, expected) out = xr.merge( [ in_ds.drop_vars("geometry").isel(index=slice(1, None)), cfxr.shapely_to_cf(in_ds.geometry.isel(index=slice(1, None))), ] ) expected = expected.isel(index=slice(1, None), node=slice(2, None)).drop_vars( "node_count" ) del expected.geometry_container.attrs["node_count"] xr.testing.assert_identical(out, expected) out = xr.merge( [ in_ds.drop_vars("geometry").isel(index=slice(1, None)), cfxr.shapely_to_cf( in_ds.geometry.isel(index=slice(1, None)), grid_mapping="longitude_latitude", ), ] ) np.testing.assert_array_equal(out.lon, expected.crd_x) assert "longitude" in out.cf assert "latitude" in out.cf out = cfxr.shapely_to_cf([Point(2, 3)]) assert set(out.dims) == {"features", "node"} @requires_shapely def test_shapely_to_cf_errors(): from shapely.geometry import LineString, Point geoms = [LineString([[1, 2], [2, 3]]), LineString([[2, 3, 4], [4, 3, 2]])] with pytest.raises(NotImplementedError, match="Only point geometries conversion"): cfxr.shapely_to_cf(geoms) geoms.append(Point(1, 2)) with pytest.raises(ValueError, match="Mixed geometry types are not supported"): cfxr.shapely_to_cf(geoms) with pytest.raises( NotImplementedError, match="Only grid mapping longitude_latitude" ): cfxr.shapely_to_cf([Point(4, 5)], grid_mapping="albers_conical_equal_area") @requires_shapely def test_cf_to_shapely(geometry_ds): in_ds, exp = geometry_ds xr.testing.assert_identical( cfxr.cf_to_shapely(in_ds).drop_vars(["crd_x", "crd_y"]), exp.geometry ) in_ds = in_ds.isel(index=slice(1, None), node=slice(2, None)).drop_vars( "node_count" ) del in_ds.geometry_container.attrs["node_count"] out = cfxr.cf_to_shapely(in_ds) assert out.dims == ("index",) @requires_shapely def test_cf_to_shapely_errors(geometry_ds): in_ds, expected = geometry_ds in_ds.geometry_container.attrs["geometry_type"] = "line" with pytest.raises(NotImplementedError, match="Only point geometries conversion"): cfxr.cf_to_shapely(in_ds) in_ds.geometry_container.attrs["geometry_type"] = "punkt" with pytest.raises(ValueError, match="Valid CF geometry types are "): cfxr.cf_to_shapely(in_ds) @requires_shapely def test_reshape_unique_geometries(geometry_ds): _, in_ds = geometry_ds out = cfxr.geometry.reshape_unique_geometries(in_ds) assert out.geometry.dims == ("features",) assert out.data.dims == ("features", "index") np.testing.assert_array_equal( out.geometry, in_ds.geometry.values[np.array([1, 2, 0])] ) in_ds["index"] = in_ds.time in_ds = in_ds.drop_vars("time").rename(index="time") out = cfxr.geometry.reshape_unique_geometries(in_ds) assert out.geometry.dims == ("features",) assert out.data.dims == ("features", "time") np.testing.assert_array_equal(out.time, [0, 1]) geoms = in_ds.geometry.expand_dims(n=[1, 2]) in_ds = in_ds.assign(geometry=geoms) with pytest.raises(ValueError, match="The geometry variable must be 1D"): cfxr.geometry.reshape_unique_geometries(in_ds)
cf_xarray/tests/test_geometry.py
5,104
empty/fill workaround to avoid numpy deprecation(warning) due to the array interface of shapely geometries.
107
en
0.63194
""" list of movies that feed into fresh_tomatoes.py file """ import fresh_tomatoes from get_movie_list import get_movie_list def main(): """ Main entry point for the script. """ # Read in the movies from the json file movie_list = get_movie_list("src/data/movies.json") # Generate the html file and display in a browser window fresh_tomatoes.open_movies_page(movie_list) main()
src/entertainment_center.py
413
Main entry point for the script. list of movies that feed into fresh_tomatoes.py file Read in the movies from the json file Generate the html file and display in a browser window
180
en
0.900583
import pandas as pd import numpy as np import itertools as it from collections import defaultdict from collections import Counter from six.moves import map as imap def dict_subset(d, fields): # return a subset of the provided dict containing only the # fields specified in fields return {k: d[k] for k in d if k in fields and d[k] is not None} class MessageFieldCounter: """ Count occurrences of values in a stream of messages for a specified set of fields Usage: messages = [ {'a': 'apple', 'b': 'boat'}, {'a': 'pear', 'b': 'boat'}, {'a': 'apple', 'b': 'boat'}, ] fields = ['a', 'b'] mfc = MessageFieldCounter(messages, fields) # this class is designed to pass through a long stream of messages # so we have to pull them through in order to count them for msg in mcf: pass print mfc.most_common('a') >>> [('apple', 2)] """ def __init__(self, messages, fields): self.fields = set(fields) self.messages = messages self.counters = defaultdict(Counter) def __iter__(self): return self.process() def process(self): for msg in self.messages: for key in self.fields: value = msg.get(key, None) if value is not None: self.counters[key][value] += 1 yield msg def most_common(self, field, n=1): return self.counters[field].most_common(n) class MessageStats(): """ Extract a set of stats from as stream of messages. numeric_fields: list of field names to compute numeric stats (eg min, max, avg) frequency_fields: list of field names to compute frequency of values """ NUMERIC_STATS = ['min', 'max', 'first', 'last', 'count'] FREQUENCY_STATS = ['most_common', 'most_common_count'] def __init__(self, messages, numeric_fields, frequency_fields): self._numeric_fields = numeric_fields self._frequency_fields = frequency_fields self.counter = MessageFieldCounter(messages, frequency_fields) messages = self.counter.process() messages = imap(dict_subset, messages, it.repeat(numeric_fields)) # DataFrame won't take an iterator, but it will take a generator messages = (m for m in messages) self.df = pd.DataFrame(messages) @property def numeric_fields(self): return self._numeric_fields @property def frequency_fields(self): return self._frequency_fields @property def frequency_counter(self): return self.counter @property def data_frame(self): return self.df def numeric_stats(self, field): def first(col): idx = col.first_valid_index() return col[idx] if idx is not None else None def last(col): idx = col.last_valid_index() return col[idx] if idx is not None else None assert field in self.numeric_fields if field in self.df: col = self.df[field] return dict( min=np.nanmin(col), max=np.nanmax(col), first=first(col), last=last(col), count=np.count_nonzero(~np.isnan(col)), ) else: return {} def frequency_stats(self, field): assert field in self.frequency_fields stat = self.frequency_counter.most_common(field) if stat: value, count = stat[0] return dict( most_common=value, most_common_count=count ) else: return {} def field_stats(self, field): stats = {} if field in self.numeric_fields: stats.update(self.numeric_stats(field)) if field in self.frequency_fields: stats.update(self.frequency_stats(field)) return stats
pipe_segment/stats/stats.py
3,984
Count occurrences of values in a stream of messages for a specified set of fields Usage: messages = [ {'a': 'apple', 'b': 'boat'}, {'a': 'pear', 'b': 'boat'}, {'a': 'apple', 'b': 'boat'}, ] fields = ['a', 'b'] mfc = MessageFieldCounter(messages, fields) # this class is designed to pass through a long stream of messages # so we have to pull them through in order to count them for msg in mcf: pass print mfc.most_common('a') >>> [('apple', 2)] Extract a set of stats from as stream of messages. numeric_fields: list of field names to compute numeric stats (eg min, max, avg) frequency_fields: list of field names to compute frequency of values return a subset of the provided dict containing only the fields specified in fields DataFrame won't take an iterator, but it will take a generator
867
en
0.722484
"""[Lambda Expressions] Lambda expressions are simply another way to create functions anonymous functions keyword \ parameter list optional \ \ the : is required, even for zero arguments \ \ / / this expression is evaluated and returned when the lambda function is called. (think of it as "the body" of the function) lambda [parameter list]: expression \ the expression returns a function object that evaluates and returns the expression when it is called Examples from tkinter import Y from unittest import FunctionTestCase lambda x: x**2 lambda x, y: x + y lambda : 'hello' lambda s: s[::-1].upper() type(lambda x: x**2) -> function Note that these expressions are function objects, but are not "named" -> anonymous Functions lambdas, or anonymous functions, are NOT equivalent to closures Assigning a Lambda to a Variable name my_func = lambda x: x**2 type(my_func) -> fuunction my_func(3) -> 9 my_func(4) -> 16 # identical to: def my_func(x): return x**2 type(my_func) -> function """
.history/my_classes/FirstClassFunctions/LambdaExpressions_20210704152007.py
1,178
[Lambda Expressions] Lambda expressions are simply another way to create functions anonymous functions keyword \ parameter list optional \ \ the : is required, even for zero arguments \ \ / / this expression is evaluated and returned when the lambda function is called. (think of it as "the body" of the function) lambda [parameter list]: expression the expression returns a function object that evaluates and returns the expression when it is called Examples from tkinter import Y from unittest import FunctionTestCase lambda x: x**2 lambda x, y: x + y lambda : 'hello' lambda s: s[::-1].upper() type(lambda x: x**2) -> function Note that these expressions are function objects, but are not "named" -> anonymous Functions lambdas, or anonymous functions, are NOT equivalent to closures Assigning a Lambda to a Variable name my_func = lambda x: x**2 type(my_func) -> fuunction my_func(3) -> 9 my_func(4) -> 16 # identical to: def my_func(x): return x**2 type(my_func) -> function
1,167
en
0.584146
""" Distance and Area objects to allow for sensible and convenient calculation and conversions. Here are some tests. """ import unittest from djmodels.contrib.gis.measure import A, Area, D, Distance class DistanceTest(unittest.TestCase): "Testing the Distance object" def testInit(self): "Testing initialization from valid units" d = Distance(m=100) self.assertEqual(d.m, 100) d1, d2, d3 = D(m=100), D(meter=100), D(metre=100) for d in (d1, d2, d3): self.assertEqual(d.m, 100) d = D(nm=100) self.assertEqual(d.m, 185200) y1, y2, y3 = D(yd=100), D(yard=100), D(Yard=100) for d in (y1, y2, y3): self.assertEqual(d.yd, 100) mm1, mm2 = D(millimeter=1000), D(MiLLiMeTeR=1000) for d in (mm1, mm2): self.assertEqual(d.m, 1.0) self.assertEqual(d.mm, 1000.0) def testInitInvalid(self): "Testing initialization from invalid units" with self.assertRaises(AttributeError): D(banana=100) def testAccess(self): "Testing access in different units" d = D(m=100) self.assertEqual(d.km, 0.1) self.assertAlmostEqual(d.ft, 328.084, 3) def testAccessInvalid(self): "Testing access in invalid units" d = D(m=100) self.assertFalse(hasattr(d, 'banana')) def testAddition(self): "Test addition & subtraction" d1 = D(m=100) d2 = D(m=200) d3 = d1 + d2 self.assertEqual(d3.m, 300) d3 += d1 self.assertEqual(d3.m, 400) d4 = d1 - d2 self.assertEqual(d4.m, -100) d4 -= d1 self.assertEqual(d4.m, -200) with self.assertRaises(TypeError): d1 + 1 with self.assertRaises(TypeError): d1 - 1 with self.assertRaises(TypeError): d1 += 1 with self.assertRaises(TypeError): d1 -= 1 def testMultiplication(self): "Test multiplication & division" d1 = D(m=100) d3 = d1 * 2 self.assertEqual(d3.m, 200) d3 = 2 * d1 self.assertEqual(d3.m, 200) d3 *= 5 self.assertEqual(d3.m, 1000) d4 = d1 / 2 self.assertEqual(d4.m, 50) d4 /= 5 self.assertEqual(d4.m, 10) d5 = d1 / D(m=2) self.assertEqual(d5, 50) a5 = d1 * D(m=10) self.assertIsInstance(a5, Area) self.assertEqual(a5.sq_m, 100 * 10) with self.assertRaises(TypeError): d1 *= D(m=1) with self.assertRaises(TypeError): d1 /= D(m=1) def testUnitConversions(self): "Testing default units during maths" d1 = D(m=100) d2 = D(km=1) d3 = d1 + d2 self.assertEqual(d3._default_unit, 'm') d4 = d2 + d1 self.assertEqual(d4._default_unit, 'km') d5 = d1 * 2 self.assertEqual(d5._default_unit, 'm') d6 = d1 / 2 self.assertEqual(d6._default_unit, 'm') def testComparisons(self): "Testing comparisons" d1 = D(m=100) d2 = D(km=1) d3 = D(km=0) self.assertGreater(d2, d1) self.assertEqual(d1, d1) self.assertLess(d1, d2) self.assertFalse(d3) def testUnitsStr(self): "Testing conversion to strings" d1 = D(m=100) d2 = D(km=3.5) self.assertEqual(str(d1), '100.0 m') self.assertEqual(str(d2), '3.5 km') self.assertEqual(repr(d1), 'Distance(m=100.0)') self.assertEqual(repr(d2), 'Distance(km=3.5)') def testUnitAttName(self): "Testing the `unit_attname` class method" unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'), ('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')] for nm, att in unit_tuple: with self.subTest(nm=nm): self.assertEqual(att, D.unit_attname(nm)) class AreaTest(unittest.TestCase): "Testing the Area object" def testInit(self): "Testing initialization from valid units" a = Area(sq_m=100) self.assertEqual(a.sq_m, 100) a = A(sq_m=100) self.assertEqual(a.sq_m, 100) a = A(sq_mi=100) self.assertEqual(a.sq_m, 258998811.0336) def testInitInvaliA(self): "Testing initialization from invalid units" with self.assertRaises(AttributeError): A(banana=100) def testAccess(self): "Testing access in different units" a = A(sq_m=100) self.assertEqual(a.sq_km, 0.0001) self.assertAlmostEqual(a.sq_ft, 1076.391, 3) def testAccessInvaliA(self): "Testing access in invalid units" a = A(sq_m=100) self.assertFalse(hasattr(a, 'banana')) def testAddition(self): "Test addition & subtraction" a1 = A(sq_m=100) a2 = A(sq_m=200) a3 = a1 + a2 self.assertEqual(a3.sq_m, 300) a3 += a1 self.assertEqual(a3.sq_m, 400) a4 = a1 - a2 self.assertEqual(a4.sq_m, -100) a4 -= a1 self.assertEqual(a4.sq_m, -200) with self.assertRaises(TypeError): a1 + 1 with self.assertRaises(TypeError): a1 - 1 with self.assertRaises(TypeError): a1 += 1 with self.assertRaises(TypeError): a1 -= 1 def testMultiplication(self): "Test multiplication & division" a1 = A(sq_m=100) a3 = a1 * 2 self.assertEqual(a3.sq_m, 200) a3 = 2 * a1 self.assertEqual(a3.sq_m, 200) a3 *= 5 self.assertEqual(a3.sq_m, 1000) a4 = a1 / 2 self.assertEqual(a4.sq_m, 50) a4 /= 5 self.assertEqual(a4.sq_m, 10) with self.assertRaises(TypeError): a1 * A(sq_m=1) with self.assertRaises(TypeError): a1 *= A(sq_m=1) with self.assertRaises(TypeError): a1 / A(sq_m=1) with self.assertRaises(TypeError): a1 /= A(sq_m=1) def testUnitConversions(self): "Testing default units during maths" a1 = A(sq_m=100) a2 = A(sq_km=1) a3 = a1 + a2 self.assertEqual(a3._default_unit, 'sq_m') a4 = a2 + a1 self.assertEqual(a4._default_unit, 'sq_km') a5 = a1 * 2 self.assertEqual(a5._default_unit, 'sq_m') a6 = a1 / 2 self.assertEqual(a6._default_unit, 'sq_m') def testComparisons(self): "Testing comparisons" a1 = A(sq_m=100) a2 = A(sq_km=1) a3 = A(sq_km=0) self.assertGreater(a2, a1) self.assertEqual(a1, a1) self.assertLess(a1, a2) self.assertFalse(a3) def testUnitsStr(self): "Testing conversion to strings" a1 = A(sq_m=100) a2 = A(sq_km=3.5) self.assertEqual(str(a1), '100.0 sq_m') self.assertEqual(str(a2), '3.5 sq_km') self.assertEqual(repr(a1), 'Area(sq_m=100.0)') self.assertEqual(repr(a2), 'Area(sq_km=3.5)') def suite(): s = unittest.TestSuite() s.addTest(unittest.makeSuite(DistanceTest)) s.addTest(unittest.makeSuite(AreaTest)) return s def run(verbosity=2): unittest.TextTestRunner(verbosity=verbosity).run(suite()) if __name__ == "__main__": run()
tests/gis_tests/test_measure.py
7,442
Testing the Area object Testing the Distance object Testing access in different units Testing access in different units Testing access in invalid units Testing access in invalid units Test addition & subtraction Test addition & subtraction Testing comparisons Testing comparisons Testing initialization from valid units Testing initialization from valid units Testing initialization from invalid units Testing initialization from invalid units Test multiplication & division Test multiplication & division Testing the `unit_attname` class method Testing default units during maths Testing default units during maths Testing conversion to strings Testing conversion to strings Distance and Area objects to allow for sensible and convenient calculation and conversions. Here are some tests.
788
en
0.779688
# console converter - USD to BGN # Write a program for converting US dollars (USD) into Bulgarian levs (BGN). # Round the result to 2 digits after the decimal point. Use a fixed exchange rate between the dollar and the lev: 1 USD = 1.79549 BGN. USD = float(input()) BGN = round(USD * 1.79549, 2) print(BGN)
1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/01_First-Steps-in-Coding/00.Book-Exercise-2.1-11-USD-to-BGN.py
308
console converter - USD to BGN Write a program for converting US dollars (USD) into Bulgarian levs (BGN). Round the result to 2 digits after the decimal point. Use a fixed exchange rate between the dollar and the lev: 1 USD = 1.79549 BGN.
238
en
0.753672
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING from azure.mgmt.core import ARMPipelineClient from msrest import Deserializer, Serializer if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Optional from azure.core.credentials import TokenCredential from ._configuration import DesktopVirtualizationAPIClientConfiguration from .operations import Operations from .operations import WorkspacesOperations from .operations import ScalingPlansOperations from .operations import ApplicationGroupsOperations from .operations import StartMenuItemsOperations from .operations import ApplicationsOperations from .operations import DesktopsOperations from .operations import HostPoolsOperations from .operations import UserSessionsOperations from .operations import SessionHostsOperations from .operations import MsixPackagesOperations from .operations import MsixImagesOperations from . import models class DesktopVirtualizationAPIClient(object): """DesktopVirtualizationAPIClient. :ivar operations: Operations operations :vartype operations: desktop_virtualization_api_client.operations.Operations :ivar workspaces: WorkspacesOperations operations :vartype workspaces: desktop_virtualization_api_client.operations.WorkspacesOperations :ivar scaling_plans: ScalingPlansOperations operations :vartype scaling_plans: desktop_virtualization_api_client.operations.ScalingPlansOperations :ivar application_groups: ApplicationGroupsOperations operations :vartype application_groups: desktop_virtualization_api_client.operations.ApplicationGroupsOperations :ivar start_menu_items: StartMenuItemsOperations operations :vartype start_menu_items: desktop_virtualization_api_client.operations.StartMenuItemsOperations :ivar applications: ApplicationsOperations operations :vartype applications: desktop_virtualization_api_client.operations.ApplicationsOperations :ivar desktops: DesktopsOperations operations :vartype desktops: desktop_virtualization_api_client.operations.DesktopsOperations :ivar host_pools: HostPoolsOperations operations :vartype host_pools: desktop_virtualization_api_client.operations.HostPoolsOperations :ivar user_sessions: UserSessionsOperations operations :vartype user_sessions: desktop_virtualization_api_client.operations.UserSessionsOperations :ivar session_hosts: SessionHostsOperations operations :vartype session_hosts: desktop_virtualization_api_client.operations.SessionHostsOperations :ivar msix_packages: MsixPackagesOperations operations :vartype msix_packages: desktop_virtualization_api_client.operations.MsixPackagesOperations :ivar msix_images: MsixImagesOperations operations :vartype msix_images: desktop_virtualization_api_client.operations.MsixImagesOperations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id: str :param str base_url: Service URL """ def __init__( self, credential, # type: "TokenCredential" subscription_id, # type: str base_url=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> None if not base_url: base_url = 'https://management.azure.com' self._config = DesktopVirtualizationAPIClientConfiguration(credential, subscription_id, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._serialize.client_side_validation = False self._deserialize = Deserializer(client_models) self.operations = Operations( self._client, self._config, self._serialize, self._deserialize) self.workspaces = WorkspacesOperations( self._client, self._config, self._serialize, self._deserialize) self.scaling_plans = ScalingPlansOperations( self._client, self._config, self._serialize, self._deserialize) self.application_groups = ApplicationGroupsOperations( self._client, self._config, self._serialize, self._deserialize) self.start_menu_items = StartMenuItemsOperations( self._client, self._config, self._serialize, self._deserialize) self.applications = ApplicationsOperations( self._client, self._config, self._serialize, self._deserialize) self.desktops = DesktopsOperations( self._client, self._config, self._serialize, self._deserialize) self.host_pools = HostPoolsOperations( self._client, self._config, self._serialize, self._deserialize) self.user_sessions = UserSessionsOperations( self._client, self._config, self._serialize, self._deserialize) self.session_hosts = SessionHostsOperations( self._client, self._config, self._serialize, self._deserialize) self.msix_packages = MsixPackagesOperations( self._client, self._config, self._serialize, self._deserialize) self.msix_images = MsixImagesOperations( self._client, self._config, self._serialize, self._deserialize) def close(self): # type: () -> None self._client.close() def __enter__(self): # type: () -> DesktopVirtualizationAPIClient self._client.__enter__() return self def __exit__(self, *exc_details): # type: (Any) -> None self._client.__exit__(*exc_details)
src/desktopvirtualization/azext_desktopvirtualization/vendored_sdks/desktopvirtualization/_desktop_virtualization_api_client.py
6,184
DesktopVirtualizationAPIClient. :ivar operations: Operations operations :vartype operations: desktop_virtualization_api_client.operations.Operations :ivar workspaces: WorkspacesOperations operations :vartype workspaces: desktop_virtualization_api_client.operations.WorkspacesOperations :ivar scaling_plans: ScalingPlansOperations operations :vartype scaling_plans: desktop_virtualization_api_client.operations.ScalingPlansOperations :ivar application_groups: ApplicationGroupsOperations operations :vartype application_groups: desktop_virtualization_api_client.operations.ApplicationGroupsOperations :ivar start_menu_items: StartMenuItemsOperations operations :vartype start_menu_items: desktop_virtualization_api_client.operations.StartMenuItemsOperations :ivar applications: ApplicationsOperations operations :vartype applications: desktop_virtualization_api_client.operations.ApplicationsOperations :ivar desktops: DesktopsOperations operations :vartype desktops: desktop_virtualization_api_client.operations.DesktopsOperations :ivar host_pools: HostPoolsOperations operations :vartype host_pools: desktop_virtualization_api_client.operations.HostPoolsOperations :ivar user_sessions: UserSessionsOperations operations :vartype user_sessions: desktop_virtualization_api_client.operations.UserSessionsOperations :ivar session_hosts: SessionHostsOperations operations :vartype session_hosts: desktop_virtualization_api_client.operations.SessionHostsOperations :ivar msix_packages: MsixPackagesOperations operations :vartype msix_packages: desktop_virtualization_api_client.operations.MsixPackagesOperations :ivar msix_images: MsixImagesOperations operations :vartype msix_images: desktop_virtualization_api_client.operations.MsixImagesOperations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id: str :param str base_url: Service URL coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=unused-import,ungrouped-imports type: "TokenCredential" type: str type: Optional[str] type: Any type: (...) -> None type: () -> None type: () -> DesktopVirtualizationAPIClient type: (Any) -> None
2,663
en
0.592768
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Utility for creating release candidates and promoting release candidates to a final relase. Usage: release.py The utility is interactive; you will be prompted for basic release information and guided through the process. This utility assumes you already have local a kafka git folder and that you have added remotes corresponding to both: (i) the github apache kafka mirror and (ii) the apache kafka git repo. """ from __future__ import print_function import datetime from getpass import getpass import json import os import subprocess import sys import tempfile PROJECT_NAME = "kafka" CAPITALIZED_PROJECT_NAME = "kafka".upper() SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) # Location of the local git repository REPO_HOME = os.environ.get("%s_HOME" % CAPITALIZED_PROJECT_NAME, SCRIPT_DIR) # Remote name, which points to Github by default PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache-github") PREFS_FILE = os.path.join(SCRIPT_DIR, '.release-settings.json') delete_gitrefs = False work_dir = None def fail(msg): if work_dir: cmd("Cleaning up work directory", "rm -rf %s" % work_dir) if delete_gitrefs: try: cmd("Resetting repository working state to branch %s" % starting_branch, "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True) cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True) cmd("Deleting git tag %s" %rc_tag , "git tag -d %s" % rc_tag, shell=True) except subprocess.CalledProcessError: print("Failed when trying to clean up git references added by this script. You may need to clean up branches/tags yourself before retrying.") print("Expected git branch: " + release_version) print("Expected git tag: " + rc_tag) print(msg) sys.exit(1) def print_output(output): if output is None or len(output) == 0: return for line in output.split('\n'): print(">", line) def cmd(action, cmd, *args, **kwargs): if isinstance(cmd, basestring) and not kwargs.get("shell", False): cmd = cmd.split() allow_failure = kwargs.pop("allow_failure", False) stdin_log = "" if "stdin" in kwargs and isinstance(kwargs["stdin"], basestring): stdin_log = "--> " + kwargs["stdin"] stdin = tempfile.TemporaryFile() stdin.write(kwargs["stdin"]) stdin.seek(0) kwargs["stdin"] = stdin print(action, cmd, stdin_log) try: output = subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs) print_output(output) except subprocess.CalledProcessError as e: print_output(e.output) if allow_failure: return print("*************************************************") print("*** First command failure occurred here. ***") print("*** Will now try to clean up working state. ***") print("*************************************************") fail("") def cmd_output(cmd, *args, **kwargs): if isinstance(cmd, basestring): cmd = cmd.split() return subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs) def replace(path, pattern, replacement): updated = [] with open(path, 'r') as f: for line in f: updated.append((replacement + '\n') if line.startswith(pattern) else line) with open(path, 'w') as f: for line in updated: f.write(line) def user_ok(msg): ok = raw_input(msg) return ok.lower() == 'y' def sftp_mkdir(dir): basedir, dirname = os.path.split(dir) if not basedir: basedir = "." try: cmd_str = """ cd %s -mkdir %s """ % (basedir, dirname) cmd("Creating '%s' in '%s' in your Apache home directory if it does not exist (errors are ok if the directory already exists)" % (dirname, basedir), "sftp -b - %s@home.apache.org" % apache_id, stdin=cmd_str, allow_failure=True) except subprocess.CalledProcessError: # This is ok. The command fails if the directory already exists pass def get_pref(prefs, name, request_fn): "Get a preference from existing preference dictionary or invoke a function that can collect it from the user" val = prefs.get(name) if not val: val = request_fn() prefs[name] = val return val # Load saved preferences prefs = {} if os.path.exists(PREFS_FILE): with open(PREFS_FILE, 'r') as prefs_fp: prefs = json.load(prefs_fp) if not user_ok("""Requirements: 1. Updated docs to reference the new release version where appropriate. 2. JDK7 and JDK8 compilers and libraries 3. Your Apache ID, already configured with SSH keys on id.apache.org and SSH keys available in this shell session 4. All issues in the target release resolved with valid resolutions (if not, this script will report the problematic JIRAs) 5. A GPG key used for signing the release. This key should have been added to public Apache servers and the KEYS file on the Kafka site 6. Standard toolset installed -- git, gpg, gradle, sftp, etc. 7. ~/.gradle/gradle.properties configured with the signing properties described in the release process wiki, i.e. mavenUrl=https://repository.apache.org/service/local/staging/deploy/maven2 mavenUsername=your-apache-id mavenPassword=your-apache-passwd signing.keyId=your-gpgkeyId signing.password=your-gpg-passphrase signing.secretKeyRingFile=/Users/your-id/.gnupg/secring.gpg (if you are using GPG 2.1 and beyond, then this file will no longer exist anymore, and you have to manually create it from the new private key directory with "gpg --export-secret-keys -o ~/.gnupg/secring.gpg") 8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e., <server> <id>apache.releases.https</id> <username>your-apache-id</username> <password>your-apache-passwd</password> </server> <server> <id>your-gpgkeyId</id> <passphrase>your-gpg-passphase</passphrase> </server> <profile> <id>gpg-signing</id> <properties> <gpg.keyname>your-gpgkeyId</gpg.keyname> <gpg.passphraseServerId>your-gpgkeyId</gpg.passphraseServerId> </properties> </profile> 9. You may also need to update some gnupgp configs: ~/.gnupg/gpg-agent.conf allow-loopback-pinentry ~/.gnupg/gpg.conf use-agent pinentry-mode loopback echo RELOADAGENT | gpg-connect-agent If any of these are missing, see https://cwiki.apache.org/confluence/display/KAFKA/Release+Process for instructions on setting them up. Some of these may be used from these previous settings loaded from %s: %s Do you have all of of these setup? (y/n): """ % (PREFS_FILE, json.dumps(prefs, indent=2))): fail("Please try again once you have all the prerequisites ready.") starting_branch = cmd_output('git rev-parse --abbrev-ref HEAD') cmd("Verifying that you have no unstaged git changes", 'git diff --exit-code --quiet') cmd("Verifying that you have no staged git changes", 'git diff --cached --exit-code --quiet') release_version = raw_input("Release version (without any RC info, e.g. 0.10.2.0): ") try: release_version_parts = release_version.split('.') if len(release_version_parts) != 4: fail("Invalid release version, should have 4 version number components") # Validate each part is a number [int(x) for x in release_version_parts] except ValueError: fail("Invalid release version, should be a dotted version number") rc = raw_input("Release candidate number: ") dev_branch = '.'.join(release_version_parts[:3]) docs_version = ''.join(release_version_parts[:3]) # Validate that the release doesn't already exist and that the cmd("Fetching tags from upstream", 'git fetch --tags %s' % PUSH_REMOTE_NAME) tags = cmd_output('git tag').split() if release_version in tags: fail("The specified version has already been tagged and released.") # TODO promotion if not rc: fail("Automatic Promotion is not yet supported.") # Find the latest RC and make sure they want to promote that one rc_tag = sorted([t for t in tags if t.startswith(release_version + '-rc')])[-1] if not user_ok("Found %s as latest RC for this release. Is this correct? (y/n): "): fail("This script couldn't determine which RC tag to promote, you'll need to fix up the RC tags and re-run the script.") sys.exit(0) # Prereq checks apache_id = get_pref(prefs, 'apache_id', lambda: raw_input("Enter your apache username: ")) jdk7_java_home = get_pref(prefs, 'jdk7', lambda: raw_input("Enter the path for JAVA_HOME for a JDK7 compiler (blank to use default JAVA_HOME): ")) jdk7_env = dict(os.environ) if jdk7_java_home.strip() else None if jdk7_env is not None: jdk7_env['JAVA_HOME'] = jdk7_java_home if "1.7.0" not in cmd_output("java -version", env=jdk7_env): fail("You must be able to build artifacts with JDK7 for Scala 2.10 and 2.11 artifacts") jdk8_java_home = get_pref(prefs, 'jdk8', lambda: raw_input("Enter the path for JAVA_HOME for a JDK8 compiler (blank to use default JAVA_HOME): ")) jdk8_env = dict(os.environ) if jdk8_java_home.strip() else None if jdk8_env is not None: jdk8_env['JAVA_HOME'] = jdk8_java_home if "1.8.0" not in cmd_output("java -version", env=jdk8_env): fail("You must be able to build artifacts with JDK8 for Scala 2.12 artifacts") def select_gpg_key(): print("Here are the available GPG keys:") available_keys = cmd_output("gpg --list-secret-keys") print(available_keys) key_name = raw_input("Which user name (enter the user name without email address): ") if key_name not in available_keys: fail("Couldn't find the requested key.") return key_name key_name = get_pref(prefs, 'gpg-key', select_gpg_key) gpg_passphrase = get_pref(prefs, 'gpg-pass', lambda: getpass("Passphrase for this GPG key: ")) # Do a quick validation so we can fail fast if the password is incorrect with tempfile.NamedTemporaryFile() as gpg_test_tempfile: gpg_test_tempfile.write("abcdefg") cmd("Testing GPG key & passphrase", ["gpg", "--batch", "--pinentry-mode", "loopback", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", gpg_test_tempfile.name + ".asc", "--detach-sig", gpg_test_tempfile.name], stdin=gpg_passphrase) # Save preferences print("Saving preferences to %s" % PREFS_FILE) with open(PREFS_FILE, 'w') as prefs_fp: prefs = json.dump(prefs, prefs_fp) # Generate RC try: int(rc) except ValueError: fail("Invalid release candidate number: %s" % rc) rc_tag = release_version + '-rc' + rc delete_gitrefs = True # Since we are about to start creating new git refs, enable cleanup function on failure to try to delete them cmd("Checking out current development branch", "git checkout -b %s %s" % (release_version, PUSH_REMOTE_NAME + "/" + dev_branch)) print("Updating version numbers") replace("gradle.properties", "version", "version=%s" % release_version) replace("tests/kafkatest/__init__.py", "__version__", "__version__ = '%s'" % release_version) cmd("update streams quickstart pom", ["sed", "-i", ".orig"," s/-SNAPSHOT//", "streams/quickstart/pom.xml"]) cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/pom.xml"]) cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/src/main/resources/archetype-resources/pom.xml"]) cmd("remove backup pom.xml", "rm streams/quickstart/pom.xml.orig") cmd("remove backup java pom.xml", "rm streams/quickstart/java/pom.xml.orig") cmd("remove backup java pom.xml", "rm streams/quickstart/java/src/main/resources/archetype-resources/pom.xml.orig") # Command in explicit list due to messages with spaces cmd("Commiting version number updates", ["git", "commit", "-a", "-m", "Bump version to %s" % release_version]) # Command in explicit list due to messages with spaces cmd("Tagging release candidate %s" % rc_tag, ["git", "tag", "-a", rc_tag, "-m", rc_tag]) rc_githash = cmd_output("git show-ref --hash " + rc_tag) cmd("Switching back to your starting branch", "git checkout %s" % starting_branch) # Note that we don't use tempfile here because mkdtemp causes problems with sftp and being able to determine the absolute path to a file. # Instead we rely on a fixed path and if it work_dir = os.path.join(REPO_HOME, ".release_work_dir") if os.path.exists(work_dir): fail("A previous attempt at a release left dirty state in the work directory. Clean up %s before proceeding. (This attempt will try to cleanup, simply retrying may be sufficient now...)" % work_dir) os.makedirs(work_dir) print("Temporary build working director:", work_dir) kafka_dir = os.path.join(work_dir, 'kafka') streams_quickstart_dir = os.path.join(kafka_dir, 'streams/quickstart') print("Streams quickstart dir", streams_quickstart_dir) cmd("Creating staging area for release artifacts", "mkdir kafka-" + rc_tag, cwd=work_dir) artifacts_dir = os.path.join(work_dir, "kafka-" + rc_tag) cmd("Cloning clean copy of repo", "git clone %s kafka" % REPO_HOME, cwd=work_dir) cmd("Checking out RC tag", "git checkout -b %s %s" % (release_version, rc_tag), cwd=kafka_dir) current_year = datetime.datetime.now().year cmd("Verifying the correct year in NOTICE", "grep %s NOTICE" % current_year, cwd=kafka_dir) with open(os.path.join(artifacts_dir, "RELEASE_NOTES.html"), 'w') as f: print("Generating release notes") try: subprocess.check_call(["./release_notes.py", release_version], stdout=f) except subprocess.CalledProcessError as e: print_output(e.output) print("*************************************************") print("*** First command failure occurred here. ***") print("*** Will now try to clean up working state. ***") print("*************************************************") fail("") params = { 'release_version': release_version, 'rc_tag': rc_tag, 'artifacts_dir': artifacts_dir } cmd("Creating source archive", "git archive --format tar.gz --prefix kafka-%(release_version)s-src/ -o %(artifacts_dir)s/kafka-%(release_version)s-src.tgz %(rc_tag)s" % params) cmd("Building artifacts", "gradle", cwd=kafka_dir, env=jdk7_env) cmd("Building artifacts", "./gradlew clean releaseTarGzAll aggregatedJavadoc", cwd=kafka_dir, env=jdk7_env) # This should be removed when Java7 is dropped (cf. KAFKA-4421) cmd("Building artifacts for Scala 2.12", "./gradlew releaseTarGz -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env) cmd("Copying artifacts", "cp %s/core/build/distributions/* %s" % (kafka_dir, artifacts_dir), shell=True) cmd("Copying artifacts", "cp -R %s/build/docs/javadoc %s" % (kafka_dir, artifacts_dir)) for filename in os.listdir(artifacts_dir): full_path = os.path.join(artifacts_dir, filename) if not os.path.isfile(full_path): continue # Commands in explicit list due to key_name possibly containing spaces cmd("Signing " + full_path, ["gpg", "--batch", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", full_path + ".asc", "--detach-sig", full_path], stdin=gpg_passphrase) cmd("Verifying " + full_path, ["gpg", "--verify", full_path + ".asc", full_path]) # Note that for verification, we need to make sure only the filename is used with --print-md because the command line # argument for the file is included in the output and verification uses a simple diff that will break if an absolut path # is used. dir, fname = os.path.split(full_path) cmd("Generating MD5 for " + full_path, "gpg --print-md md5 %s > %s.md5" % (fname, fname), shell=True, cwd=dir) cmd("Generating SHA1 for " + full_path, "gpg --print-md sha1 %s > %s.sha1" % (fname, fname), shell=True, cwd=dir) cmd("Generating SHA512 for " + full_path, "gpg --print-md sha512 %s > %s.sha512" % (fname, fname), shell=True, cwd=dir) cmd("Listing artifacts to be uploaded:", "ls -R %s" % artifacts_dir) if not user_ok("Going to upload the artifacts in %s, listed above, to your Apache home directory. Ok (y/n)?): " % artifacts_dir): fail("Quitting") sftp_mkdir("public_html") kafka_output_dir = "kafka-" + rc_tag sftp_mkdir(os.path.join("public_html", kafka_output_dir)) public_release_dir = os.path.join("public_html", kafka_output_dir) # The sftp -r option doesn't seem to work as would be expected, at least with the version shipping on OS X. To work around this we process all the files and directories manually... sftp_cmds = "" for root, dirs, files in os.walk(artifacts_dir): assert root.startswith(artifacts_dir) for dir in dirs: sftp_mkdir(os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], dir)) for file in files: local_path = os.path.join(root, file) remote_path = os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], file) sftp_cmds = """ put %s %s """ % (local_path, remote_path) cmd("Uploading artifacts in %s to your Apache home directory" % root, "sftp -b - %s@home.apache.org" % apache_id, stdin=sftp_cmds) with open(os.path.expanduser("~/.gradle/gradle.properties")) as f: contents = f.read() if not user_ok("Going to build and upload mvn artifacts based on these settings:\n" + contents + '\nOK (y/n)?: '): fail("Retry again later") cmd("Building and uploading archives", "./gradlew uploadArchivesAll", cwd=kafka_dir, env=jdk7_env) cmd("Building and uploading archives", "./gradlew uploadCoreArchives_2_12 -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env) cmd("Building and uploading archives", "mvn deploy -Pgpg-signing", cwd=streams_quickstart_dir, env=jdk7_env) release_notification_props = { 'release_version': release_version, 'rc': rc, 'rc_tag': rc_tag, 'rc_githash': rc_githash, 'dev_branch': dev_branch, 'docs_version': docs_version, 'apache_id': apache_id, } # TODO: Many of these suggested validation steps could be automated and would help pre-validate a lot of the stuff voters test print(""" ******************************************************************************************************************************************************* Ok. We've built and staged everything for the %(rc_tag)s. Now you should sanity check it before proceeding. All subsequent steps start making RC data public. Some suggested steps: * Grab the source archive and make sure it compiles: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz * Grab one of the binary distros and run the quickstarts against them: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s.tgz * Extract and verify one of the site docs jars: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s-site-docs.tgz * Build a sample against jars in the staging repo: (TODO: Can we get a temporary URL before "closing" the staged artifacts?) * Validate GPG signatures on at least one file: wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz && wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.asc && wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.md5 && wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha1 && wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha512 && gpg --verify kafka-%(release_version)s-src.tgz.asc kafka-%(release_version)s-src.tgz && gpg --print-md md5 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.md5 && gpg --print-md sha1 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha1 && gpg --print-md sha512 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha512 && rm kafka-%(release_version)s-src.tgz* && echo "OK" || echo "Failed" * Validate the javadocs look ok. They are at http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/ ******************************************************************************************************************************************************* """ % release_notification_props) if not user_ok("Have you sufficiently verified the release artifacts (y/n)?: "): fail("Ok, giving up") print("Next, we need to get the Maven artifacts we published into the staging repository.") # TODO: Can we get this closed via a REST API since we already need to collect credentials for this repo? print("Go to https://repository.apache.org/#stagingRepositories and hit 'Close' for the new repository that was created by uploading artifacts.") if not user_ok("Have you successfully deployed the artifacts (y/n)?: "): fail("Ok, giving up") if not user_ok("Ok to push RC tag %s (y/n)?: " % rc_tag): fail("Ok, giving up") cmd("Pushing RC tag", "git push %s %s" % (PUSH_REMOTE_NAME, rc_tag)) # Move back to starting branch and clean out the temporary release branch (e.g. 0.10.2.0) we used to generate everything cmd("Resetting repository working state", "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True) cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True) email_contents = """ To: dev@kafka.apache.org, users@kafka.apache.org, kafka-clients@googlegroups.com Subject: [VOTE] %(release_version)s RC%(rc)s Hello Kafka users, developers and client-developers, This is the first candidate for release of Apache Kafka %(release_version)s. <DESCRIPTION OF MAJOR CHANGES, INCLUDE INDICATION OF MAJOR/MINOR RELEASE> Release notes for the %(release_version)s release: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/RELEASE_NOTES.html *** Please download, test and vote by <VOTING DEADLINE, e.g. Monday, March 28, 9am PT> Kafka's KEYS file containing PGP keys we use to sign the release: http://kafka.apache.org/KEYS * Release artifacts to be voted upon (source and binary): http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/ * Maven artifacts to be voted upon: https://repository.apache.org/content/groups/staging/ * Javadoc: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/ * Tag to be voted upon (off %(dev_branch)s branch) is the %(release_version)s tag: https://github.com/apache/kafka/releases/tag/%(rc_tag)s * Documentation: http://kafka.apache.org/%(docs_version)s/documentation.html * Protocol: http://kafka.apache.org/%(docs_version)s/protocol.html * Successful Jenkins builds for the %(dev_branch)s branch: Unit/integration tests: https://builds.apache.org/job/kafka-%(dev_branch)s-jdk7/<BUILD NUMBER>/ System tests: https://jenkins.confluent.io/job/system-test-kafka/job/%(dev_branch)s/<BUILD_NUMBER>/ /************************************** Thanks, <YOU> """ % release_notification_props print() print() print("*****************************************************************") print() print(email_contents) print() print("*****************************************************************") print() print("All artifacts should now be fully staged. Use the above template to send the announcement for the RC to the mailing list.") print("IMPORTANT: Note that there are still some substitutions that need to be made in the template:") print(" - Describe major changes in this release") print(" - Deadline for voting, which should be at least 3 days after you send out the email") print(" - Jenkins build numbers for successful unit & system test builds") print(" - Fill in your name in the signature") print(" - Finally, validate all the links before shipping!") print("Note that all substitutions are annotated with <> around them.")
release.py
24,937
Get a preference from existing preference dictionary or invoke a function that can collect it from the user Utility for creating release candidates and promoting release candidates to a final relase. Usage: release.py The utility is interactive; you will be prompted for basic release information and guided through the process. This utility assumes you already have local a kafka git folder and that you have added remotes corresponding to both: (i) the github apache kafka mirror and (ii) the apache kafka git repo. !/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Location of the local git repository Remote name, which points to Github by default This is ok. The command fails if the directory already exists Load saved preferences Validate each part is a number Validate that the release doesn't already exist and that the TODO promotion Find the latest RC and make sure they want to promote that one Prereq checks Do a quick validation so we can fail fast if the password is incorrect Save preferences Generate RC Since we are about to start creating new git refs, enable cleanup function on failure to try to delete them Command in explicit list due to messages with spaces Command in explicit list due to messages with spaces Note that we don't use tempfile here because mkdtemp causes problems with sftp and being able to determine the absolute path to a file. Instead we rely on a fixed path and if it This should be removed when Java7 is dropped (cf. KAFKA-4421) Commands in explicit list due to key_name possibly containing spaces Note that for verification, we need to make sure only the filename is used with --print-md because the command line argument for the file is included in the output and verification uses a simple diff that will break if an absolut path is used. The sftp -r option doesn't seem to work as would be expected, at least with the version shipping on OS X. To work around this we process all the files and directories manually... TODO: Many of these suggested validation steps could be automated and would help pre-validate a lot of the stuff voters test TODO: Can we get this closed via a REST API since we already need to collect credentials for this repo? Move back to starting branch and clean out the temporary release branch (e.g. 0.10.2.0) we used to generate everything
3,041
en
0.901758
from masonite.foundation import response_handler from masonite.storage import StorageCapsule from masonite.auth import Sign from masonite.environment import LoadEnvironment from masonite.utils.structures import load from masonite.utils.location import base_path from masonite.middleware import ( SessionMiddleware, EncryptCookies, LoadUserMiddleware, MaintenanceModeMiddleware, ) from masonite.routes import Route from masonite.configuration.Configuration import Configuration from masonite.configuration import config from config.filesystem import STATICFILES from app.middlewares.VerifyCsrfToken import VerifyCsrfToken class Kernel: http_middleware = [MaintenanceModeMiddleware, EncryptCookies] route_middleware = { "web": [SessionMiddleware, LoadUserMiddleware, VerifyCsrfToken], } def __init__(self, app): self.application = app def register(self): # Register routes self.load_environment() self.register_configurations() self.register_middleware() self.register_routes() self.register_database() self.register_templates() self.register_storage() def load_environment(self): LoadEnvironment() def register_configurations(self): # load configuration self.application.bind("config.location", "config") configuration = Configuration(self.application) configuration.load() self.application.bind("config", configuration) key = config("application.key") self.application.bind("key", key) self.application.bind("sign", Sign(key)) # set locations self.application.bind("resources.location", "resources/") self.application.bind("controllers.location", "app/controllers") self.application.bind("jobs.location", "app/jobs") self.application.bind("providers.location", "app/providers") self.application.bind("mailables.location", "app/mailables") self.application.bind("listeners.location", "app/listeners") self.application.bind("validation.location", "app/validation") self.application.bind("notifications.location", "app/notifications") self.application.bind("events.location", "app/events") self.application.bind("tasks.location", "app/tasks") self.application.bind("models.location", "app/models") self.application.bind("observers.location", "app/models/observers") self.application.bind("policies.location", "app/policies") self.application.bind("commands.location", "app/commands") self.application.bind("middlewares.location", "app/middlewares") self.application.bind("server.runner", "masonite.commands.ServeCommand.main") def register_middleware(self): self.application.make("middleware").add(self.route_middleware).add(self.http_middleware) def register_routes(self): Route.set_controller_locations(self.application.make("controllers.location")) self.application.bind("routes.location", "routes/web") self.application.make("router").add( Route.group( load(self.application.make("routes.location"), "ROUTES"), middleware=["web"] ) ) def register_database(self): from masoniteorm.query import QueryBuilder self.application.bind( "builder", QueryBuilder(connection_details=config("database.databases")), ) self.application.bind("migrations.location", "databases/migrations") self.application.bind("seeds.location", "databases/seeds") self.application.bind("resolver", config("database.db")) def register_templates(self): self.application.bind("views.location", "templates/") def register_storage(self): storage = StorageCapsule() storage.add_storage_assets(STATICFILES) self.application.bind("storage_capsule", storage) self.application.set_response_handler(response_handler) self.application.use_storage_path(base_path("storage"))
Kernel.py
4,103
Register routes load configuration set locations
48
en
0.709209
import html import os import pathlib import shutil import sqlite3 import sys from collections import OrderedDict from scripts.html_parts import * from scripts.ilapfuncs import logfunc from scripts.version_info import aleapp_version, aleapp_contributors def get_icon_name(category, artifact): ''' Returns the icon name from the feathericons collection. To add an icon type for an artifact, select one of the types from ones listed @ feathericons.com If no icon is available, the alert triangle is returned as default icon. ''' category = category.upper() artifact = artifact.upper() icon = 'alert-triangle' # default (if not defined!) ## Please keep list below SORTED by category if category.find('ACCOUNT') >= 0: if artifact.find('AUTH') >= 0: icon = 'key' else: icon = 'user' elif category == 'ADDRESS BOOK': icon = 'book-open' elif category == 'ALARMS': icon = 'clock' elif category == 'AIRTAGS': icon = 'map-pin' elif category == 'APPLE PODCASTS': icon = 'play-circle' elif category == 'APPLE WALLET': if artifact == 'TRANSACTIONS': icon = 'dollar-sign' if artifact == 'CARDS': icon = 'credit-card' if artifact == 'PASSES': icon = 'send' elif category == 'APP CONDUIT': icon = 'activity' elif category == 'APP PERMISSIONS': icon = 'key' elif category == 'CARPLAY': icon = 'package' elif category == 'CASH APP': icon = 'credit-card' elif category == 'APP UPDATES': icon = 'codepen' elif category == 'APPLICATIONS': icon = 'grid' elif category == 'AGGREGATE DICTIONARY': icon = 'book' elif category == 'BLUETOOTH': icon = 'bluetooth' elif category == 'CALENDAR': icon = 'calendar' elif category == 'CALL HISTORY': icon = 'phone-call' elif category == 'CELLULAR WIRELESS': icon = 'bar-chart' elif category == 'CLOUDKIT': if artifact == 'PARTICIPANTS': icon = 'user' elif artifact == 'NOTE SHARING': icon = 'share-2' elif category == 'CONNECTED TO': icon = 'zap' elif category == 'COREDUET': if artifact == 'AIRPLANE MODE': icon = 'pause' if artifact == 'LOCK STATE': icon = 'lock' if artifact == 'PLUGGED IN': icon = 'battery-charging' elif category == 'DATA USAGE': icon = 'wifi' elif category == 'DEVICE INFO': if artifact == 'BUILD INFO': icon = 'terminal' elif artifact == 'IOS SYSTEM VERSION': icon = 'git-commit' elif artifact == 'PARTNER SETTINGS': icon = 'settings' elif artifact.find('SETTINGS_SECURE_') >= 0: icon = 'settings' else: icon = 'info' elif category == 'DHCP': icon = 'settings' elif category == 'DISCORD': if artifact == 'DISCORD MESSAGES': icon = 'message-square' if artifact == 'DISCORD ACCOUNT': icon = 'user' if artifact == 'DISCORD MANIFEST': icon = 'file-text' elif category == 'FACEBOOK MESSENGER': icon = 'facebook' elif category == 'FILES APP': icon = 'file-text' elif category == 'GEOLOCATION': if artifact == 'APPLICATIONS': icon = 'grid' elif artifact == 'MAP TILE CACHE': icon = 'map' elif artifact == 'PD PLACE CACHE': icon = 'map-pin' elif category == 'GOOGLE DUO': if artifact == 'GOOGLE DUO - CALL HISTORY': icon = 'phone-call' if artifact == 'GOOGLE DUO - CONTACTS': icon = 'user' if artifact == 'GOOGLE DUO - CLIPS': icon = 'video' elif category == 'HEALTH DATA': icon = 'heart' elif category == 'ICLOUD QUICK LOOK': icon = 'file' elif category == 'ICLOUD RETURNS': icon = 'cloud' elif category == 'ICLOUD SHARED ALBUMS': icon = 'cloud' elif category == 'IMO HD CHAT': if artifact == 'IMO HD CHAT - MESSAGES': icon = 'message-circle' if artifact == 'IMO HD CHAT - CONTACTS': icon = 'user' elif category == 'INSTAGRAM': if artifact == 'INSTAGRAM THREADS': icon = 'message-square' if artifact == 'INSTAGRAM THREADS CALLS': icon = 'phone' elif category == 'INSTALLED APPS': icon = 'package' elif category == 'INTERACTIONC': if artifact == 'CONTACTS': icon = 'user' elif artifact == 'ATTACHMENTS': icon = 'paperclip' elif category == 'IOS BUILD': icon = 'git-commit' elif category == 'IOS MAIL': icon = 'mail' elif category == 'IOS SCREENS': icon = 'maximize' elif category == 'KEYBOARD': if artifact == 'KEYBOARD DYNAMIC LEXICON': icon = 'type' elif artifact == 'KEYBOARD APPLICATION USAGE': icon = 'type' elif category == 'KIK': if artifact == 'KIK MESSAGES': icon = 'message-square' if artifact == 'KIK USERS': icon = 'user' if artifact == 'KIK MEDIA METADATA': icon = 'file-plus' if artifact == 'KIK PENDING UPLOADS': icon = 'upload' elif category == 'KNOWLEDGEC': if artifact == 'KNOWLEDGEC DEVICE LOCKED': icon = 'lock' elif artifact == 'KNOWLEDGEC PLUGGED IN': icon = 'battery-charging' elif artifact == 'KNOWLEDGEC BATTERY LEVEL': icon = 'battery' else: icon = 'activity' elif category == 'LOCATIONS': if artifact == 'APPLE MAPS SEARCH HISTORY': icon = 'search' else: icon = 'map-pin' elif category == 'LOCATION SERVICES CONFIGURATIONS': icon = 'settings' elif category == 'MEDIA LIBRARY': icon = 'play-circle' elif category == 'MEDIA METADATA': icon = 'file-plus' elif category == 'MEDICAL ID': icon = 'thermometer' elif category == 'MICROSOFT TEAMS - LOGS': if artifact == 'TEAMS LOCATIONS': icon = 'map-pin' if artifact == 'TEAMS MOTION': icon = 'move' if artifact == 'TEAMS STATE CHANGE': icon = 'truck' if artifact == 'TEAMS POWER LOG': icon = 'battery-charging' if artifact == 'TEAMS TIMEZONE': icon = 'clock' elif category == 'MICROSOFT TEAMS': if artifact == 'TEAMS MESSAGES': icon = 'message-square' if artifact == 'TEAMS CONTACT': icon = 'users' if artifact == 'TEAMS USER': icon = 'user' if artifact == 'TEAMS CALL LOGS': icon = 'phone' if artifact == 'TEAMS SHARED LOCATIONS': icon = 'map-pin' elif category == 'MOBILE ACTIVATION LOGS': icon = 'clipboard' elif category == 'MOBILE BACKUP': icon = 'save' elif category == 'MOBILE CONTAINER MANAGER': icon = 'save' elif category == 'MOBILE INSTALLATION LOGS': icon = 'clipboard' elif category == 'MOBILE SOFTWARE UPDATE': icon = 'refresh-cw' elif category == 'NOTES': icon = 'file-text' elif category == 'NOTIFICATIONS': icon = 'bell' elif category == 'PHOTOS': icon = 'image' elif category == 'POWERLOG': icon = 'power' elif category == 'POWERLOG BACKUPS': icon = 'power' elif category == 'PROTON MAIL': icon = 'mail' elif category == 'RECENT ACTIVITY': icon = 'activity' elif category == 'REMINDERS': icon = 'list' elif category == 'ROUTINED': icon = 'map' elif category == 'SAFARI BROWSER': icon = 'compass' elif category == 'SCREENTIME': icon = 'monitor' elif category == 'SCRIPT LOGS': icon = 'archive' elif category == 'SLACK': if artifact == 'SLACK MESSAGES': icon = 'message-square' if artifact == 'SLACK USER DATA': icon = 'user' if artifact == 'SLACK ATTACHMENTS': icon = 'paperclip' if artifact == 'SLACK WORKSPACE DATA': icon = 'slack' if artifact == 'SLACK TEAM DATA': icon = 'slack' if artifact == 'SLACK CHANNEL DATA': icon = 'slack' elif category == 'SMS & IMESSAGE': icon = 'message-square' elif category == 'SQLITE JOURNALING': icon = 'book-open' elif category == 'TEXT INPUT MESSAGES': icon = 'message-square' elif category == 'TIKTOK': if artifact == 'TIKTOK MESSAGES': icon = 'message-square' if artifact == 'TIKTOK CONTACTS': icon = 'user' elif category == 'USER DICTIONARY': icon = 'book' elif category == 'VENMO': icon = 'dollar-sign' elif category == 'VIBER': if artifact == 'VIBER - SETTINGS': icon = 'settings' if artifact == 'VIBER - CONTACTS': icon = 'users' if artifact == 'VIBER - CHATS': icon = 'message-square' if artifact == 'VIBER - CALL REMNANTS': icon = 'phone-call' elif category == 'VOICE-RECORDINGS': icon = 'mic' elif category == 'VOICE-TRIGGERS': icon = 'mic' elif category == 'WHATSAPP': if artifact == 'WHATSAPP - MESSAGES': icon = 'message-square' if artifact == 'WHATSAPP - CONTACTS': icon = 'users' elif category == 'WIFI CONNECTIONS': icon = 'wifi' elif category == 'WIFI KNOWN NETWORKS': icon = 'wifi' return icon def generate_report(reportfolderbase, time_in_secs, time_HMS, extraction_type, image_input_path): control = None side_heading = \ """<h6 class="sidebar-heading justify-content-between align-items-center px-3 mt-4 mb-1 text-muted"> {0} </h6> """ list_item = \ """ <li class="nav-item"> <a class="nav-link {0}" href="{1}"> <span data-feather="{2}"></span> {3} </a> </li> """ # Populate the sidebar dynamic data (depends on data/files generated by parsers) # Start with the 'saved reports' (home) page link and then append elements nav_list_data = side_heading.format('Saved Reports') + list_item.format('', 'index.html', 'home', 'Report Home') # Get all files side_list = OrderedDict() # { Category1 : [path1, path2, ..], Cat2:[..] } Dictionary containing paths as values, key=category for root, dirs, files in sorted(os.walk(reportfolderbase)): for file in files: if file.endswith(".temphtml"): fullpath = (os.path.join(root, file)) head, tail = os.path.split(fullpath) p = pathlib.Path(fullpath) SectionHeader = (p.parts[-2]) if SectionHeader == '_elements': pass else: if control == SectionHeader: side_list[SectionHeader].append(fullpath) icon = get_icon_name(SectionHeader, tail.replace(".temphtml", "")) nav_list_data += list_item.format('', tail.replace(".temphtml", ".html"), icon, tail.replace(".temphtml", "")) else: control = SectionHeader side_list[SectionHeader] = [] side_list[SectionHeader].append(fullpath) nav_list_data += side_heading.format(SectionHeader) icon = get_icon_name(SectionHeader, tail.replace(".temphtml", "")) nav_list_data += list_item.format('', tail.replace(".temphtml", ".html"), icon, tail.replace(".temphtml", "")) # Now that we have all the file paths, start writing the files for category, path_list in side_list.items(): for path in path_list: old_filename = os.path.basename(path) filename = old_filename.replace(".temphtml", ".html") # search for it in nav_list_data, then mark that one as 'active' tab active_nav_list_data = mark_item_active(nav_list_data, filename) + nav_bar_script artifact_data = get_file_content(path) # Now write out entire html page for artifact f = open(os.path.join(reportfolderbase, filename), 'w', encoding='utf8') artifact_data = insert_sidebar_code(artifact_data, active_nav_list_data, path) f.write(artifact_data) f.close() # Now delete .temphtml os.remove(path) # If dir is empty, delete it try: os.rmdir(os.path.dirname(path)) except OSError: pass # Perhaps it was not empty! # Create index.html's page content create_index_html(reportfolderbase, time_in_secs, time_HMS, extraction_type, image_input_path, nav_list_data) elements_folder = os.path.join(reportfolderbase, '_elements') os.mkdir(elements_folder) __location__ = os.path.dirname(os.path.abspath(__file__)) def copy_no_perm(src, dst, *, follow_symlinks=True): if not os.path.isdir(dst): shutil.copyfile(src, dst) return dst try: shutil.copyfile(os.path.join(__location__, "logo.jpg"), os.path.join(elements_folder, "logo.jpg")) shutil.copyfile(os.path.join(__location__, "dashboard.css"), os.path.join(elements_folder, "dashboard.css")) shutil.copyfile(os.path.join(__location__, "feather.min.js"), os.path.join(elements_folder, "feather.min.js")) shutil.copyfile(os.path.join(__location__, "dark-mode.css"), os.path.join(elements_folder, "dark-mode.css")) shutil.copyfile(os.path.join(__location__, "dark-mode-switch.js"), os.path.join(elements_folder, "dark-mode-switch.js")) shutil.copyfile(os.path.join(__location__, "chats.css"), os.path.join(elements_folder, "chats.css")) shutil.copytree(os.path.join(__location__, "MDB-Free_4.13.0"), os.path.join(elements_folder, 'MDB-Free_4.13.0'), copy_function=copy_no_perm) except shutil.Error: print("shutil reported an error. Maybe due to recursive directory copying.") if os.path.exists(os.path.join(elements_folder, 'MDB-Free_4.13.0')): print("_elements folder seems fine. Probably nothing to worry about") def get_file_content(path): f = open(path, 'r', encoding='utf8') data = f.read() f.close() return data def create_index_html(reportfolderbase, time_in_secs, time_HMS, extraction_type, image_input_path, nav_list_data): '''Write out the index.html page to the report folder''' content = '<br />' content += """ <div class="card bg-white" style="padding: 20px;"> <h2 class="card-title">Case Information</h2> """ # CARD start case_list = [['Extraction location', image_input_path], ['Extraction type', extraction_type], ['Report directory', reportfolderbase], ['Processing time', f'{time_HMS} (Total {time_in_secs} seconds)']] tab1_content = generate_key_val_table_without_headings('', case_list) + \ """ <p class="note note-primary mb-4"> All dates and times are in UTC unless noted otherwise! </p> """ # Get script run log (this will be tab2) devinfo_files_path = os.path.join(reportfolderbase, 'Script Logs', 'DeviceInfo.html') tab2_content = get_file_content(devinfo_files_path) # Get script run log (this will be tab3) script_log_path = os.path.join(reportfolderbase, 'Script Logs', 'Screen Output.html') tab3_content = get_file_content(script_log_path) # Get processed files list (this will be tab3) processed_files_path = os.path.join(reportfolderbase, 'Script Logs', 'ProcessedFilesLog.html') tab4_content = get_file_content(processed_files_path) content += tabs_code.format(tab1_content, tab2_content, tab3_content, tab4_content) content += '</div>' # CARD end authors_data = generate_authors_table_code(aleapp_contributors) credits_code = credits_block.format(authors_data) # WRITE INDEX.HTML LAST filename = 'index.html' page_title = 'iLEAPP Report' body_heading = 'iOS Logs Events And Protobuf Parser' body_description = 'iLEAPP is an open source project that aims to parse every known iOS artifact for the purpose of forensic analysis.' active_nav_list_data = mark_item_active(nav_list_data, filename) + nav_bar_script f = open(os.path.join(reportfolderbase, filename), 'w', encoding='utf8') f.write(page_header.format(page_title)) f.write(body_start.format(f"iLEAPP {aleapp_version}")) f.write(body_sidebar_setup + active_nav_list_data + body_sidebar_trailer) f.write(body_main_header + body_main_data_title.format(body_heading, body_description)) f.write(content) f.write(thank_you_note) f.write(credits_code) f.write(body_main_trailer + body_end + nav_bar_script_footer + page_footer) f.close() def generate_authors_table_code(aleapp_contributors): authors_data = '' for author_name, blog, tweet_handle, git in aleapp_contributors: author_data = '' if blog: author_data += f'<a href="{blog}" target="_blank">{blog_icon}</a> &nbsp;\n' else: author_data += f'{blank_icon} &nbsp;\n' if tweet_handle: author_data += f'<a href="https://twitter.com/{tweet_handle}" target="_blank">{twitter_icon}</a> &nbsp;\n' else: author_data += f'{blank_icon} &nbsp;\n' if git: author_data += f'<a href="{git}" target="_blank">{github_icon}</a>\n' else: author_data += f'{blank_icon}' authors_data += individual_contributor.format(author_name, author_data) return authors_data def generate_key_val_table_without_headings(title, data_list, html_escape=True, width="70%"): '''Returns the html code for a key-value table (2 cols) without col names''' code = '' if title: code += f'<h2>{title}</h2>' table_header_code = \ """ <div class="table-responsive"> <table class="table table-bordered table-hover table-sm" width={}> <tbody> """ table_footer_code = \ """ </tbody> </table> </div> """ code += table_header_code.format(width) # Add the rows if html_escape: for row in data_list: code += '<tr>' + ''.join(('<td>{}</td>'.format(html.escape(str(x))) for x in row)) + '</tr>' else: for row in data_list: code += '<tr>' + ''.join(('<td>{}</td>'.format(str(x)) for x in row)) + '</tr>' # Add footer code += table_footer_code return code def insert_sidebar_code(data, sidebar_code, filename): pos = data.find(body_sidebar_dynamic_data_placeholder) if pos < 0: logfunc(f'Error, could not find {body_sidebar_dynamic_data_placeholder} in file {filename}') return data else: ret = data[0: pos] + sidebar_code + data[pos + len(body_sidebar_dynamic_data_placeholder):] return ret def mark_item_active(data, itemname): '''Finds itemname in data, then marks that node as active. Return value is changed data''' pos = data.find(f'" href="{itemname}"') if pos < 0: logfunc(f'Error, could not find {itemname} in {data}') return data else: ret = data[0: pos] + " active" + data[pos:] return ret
scripts/report.py
20,013
Write out the index.html page to the report folder Returns the html code for a key-value table (2 cols) without col names Returns the icon name from the feathericons collection. To add an icon type for an artifact, select one of the types from ones listed @ feathericons.com If no icon is available, the alert triangle is returned as default icon. Finds itemname in data, then marks that node as active. Return value is changed data default (if not defined!) Please keep list below SORTED by category Populate the sidebar dynamic data (depends on data/files generated by parsers) Start with the 'saved reports' (home) page link and then append elements Get all files { Category1 : [path1, path2, ..], Cat2:[..] } Dictionary containing paths as values, key=category Now that we have all the file paths, start writing the files search for it in nav_list_data, then mark that one as 'active' tab Now write out entire html page for artifact Now delete .temphtml If dir is empty, delete it Perhaps it was not empty! Create index.html's page content CARD start Get script run log (this will be tab2) Get script run log (this will be tab3) Get processed files list (this will be tab3) CARD end WRITE INDEX.HTML LAST Add the rows Add footer
1,235
en
0.794919
import unittest from realm.cli.application import Application from realm.cli.commands.install import InstallCommand from realm.cli.commands.ls import LsCommand from realm.cli.commands.task import TaskCommand from realm.entities import Config, RealmContext from realm.utils.child_process import ChildProcess from tests.common import get_tests_root_dir, captured_output REPO_DIR = get_tests_root_dir().joinpath('scenarios/multiple_packages_with_tasks') class TestCommands(unittest.TestCase): @classmethod def setUpClass(cls) -> None: # Create config once cls.cfg = Config.from_file(realm_json_file=str(REPO_DIR.joinpath('realm.json'))) def setUp(self) -> None: # Create context every test self.ctx = RealmContext(config=self.cfg, projects=Application.get_projects(self.cfg)) def test_scan(self): found = len(self.ctx.projects) self.assertEqual(found, 1) def test_ls(self): cmd = LsCommand(self.ctx) with captured_output() as (out, _): cmd.run() output = out.getvalue().strip() self.assertEqual(output, 'pkg@0.1.0') def test_task_install(self): install_cmd = InstallCommand(self.ctx) task_cmd = TaskCommand(self.ctx, task_name='test') self.assertEqual(len(task_cmd.ctx.projects), 1) with captured_output(stderr=False) as (out, _): install_cmd.run() task_cmd.run() output = out.getvalue() self.assertIn('Installing the current project: pkg', output) self.assertIn('Poe => python -m unittest discover -s tests -v -p "test_*.py"', output) def test_git_diff(self): cmd = LsCommand(self.ctx, since='.') with captured_output() as (out, _): cmd.run() output = out.getvalue().strip() self.assertEqual(output, '') def test_git_diff_with_change(self): pkg_proj = [p for p in self.ctx.projects if p.name == 'pkg'][0] try: with pkg_proj.source_dir.joinpath('pyproject.toml').open('a') as f: print('', file=f) cmd = LsCommand(self.ctx, since='.') with captured_output() as (out, _): cmd.run() output = out.getvalue().strip() self.assertEqual(output, 'pkg@0.1.0') finally: ChildProcess.run(f'git checkout {pkg_proj.source_dir}') def test_scope_filter(self): cmd = LsCommand(self.ctx, scope=['p*']) with captured_output() as (out, _): cmd.run() output = out.getvalue().strip() self.assertEqual(output, 'pkg@0.1.0') def test_ignore_filter(self): cmd = LsCommand(self.ctx, ignore=['p*']) with captured_output() as (out, _): cmd.run() output = out.getvalue().strip() self.assertEqual(output, '') def test_match_filter(self): cmd = LsCommand(self.ctx, match=['labels.type=package']) with captured_output() as (out, _): cmd.run() output = out.getvalue().strip() self.assertEqual(output, 'pkg@0.1.0')
tests/test_multiple_packages_with_tasks.py
3,149
Create config once Create context every test
44
en
0.31586
#! /usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from setuptools import setup except ImportError: from distutils.core import setup from sys import version_info install_requires = [] if version_info[:2] <= (2, 5): install_requires.append('simplejson >= 2.0.9') setup( name = 'avro', version = '1.7.6', packages = ['avro',], package_dir = {'avro': 'src/avro'}, scripts = ["./scripts/avro"], # Project uses simplejson, so ensure that it gets installed or upgraded # on the target machine install_requires = install_requires, # metadata for upload to PyPI author = 'Apache Avro', author_email = 'avro-dev@hadoop.apache.org', description = 'Avro is a serialization and RPC framework.', license = 'Apache License 2.0', keywords = 'avro serialization rpc', url = 'http://hadoop.apache.org/avro', extras_require = { 'snappy': ['python-snappy'], }, )
desktop/core/ext-py/avro-1.7.6/setup.py
1,654
! /usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Project uses simplejson, so ensure that it gets installed or upgraded on the target machine metadata for upload to PyPI
893
en
0.86476
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from .core import *
kiberdrom_core/controller/__init__.py
67
!/usr/bin/env python3 -*- coding: utf-8 -*-
43
fr
0.304089
from django import template from django.utils.safestring import mark_safe import markdown from markdownx.utils import markdownify from markdownx.settings import ( MARKDOWNX_MARKDOWN_EXTENSIONS, MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS ) from markdown.extensions import Extension register = template.Library() @register.filter def markdown_to_html(text): """マークダウンをhtmlに変換する。""" return mark_safe(markdownify(text)) class EscapeHtml(Extension): def extendMarkdown(self, md): md.preprocessors.deregister('html_block') md.inlinePatterns.deregister('html') @register.filter def markdown_to_html_with_escape(text): """マークダウンをhtmlに変換する。 生のHTMLやCSS、JavaScript等のコードをエスケープした上で、マークダウンをHTMLに変換します。 公開しているコメント欄等には、こちらを使ってください。 """ extensions = MARKDOWNX_MARKDOWN_EXTENSIONS + [EscapeHtml()] html = markdown.markdown( text, extensions=extensions, extension_configs=MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS ) return mark_safe(html)
blog/templatetags/markdown_html.py
1,180
マークダウンをhtmlに変換する。 マークダウンをhtmlに変換する。 生のHTMLやCSS、JavaScript等のコードをエスケープした上で、マークダウンをHTMLに変換します。 公開しているコメント欄等には、こちらを使ってください。
120
ja
1.000046
import numpy as np from bayes_implicit_solvent.continuous_parameter_experiments.elemental_types_mh import log_prior, mols, ll, data_path, \ smiles smiles_list = smiles from bayes_implicit_solvent.typers import RADIUS_UNIT from bayes_implicit_solvent.freesolv import smiles_list from bayes_implicit_solvent.typers import AtomSpecificationProposal np.random.seed(0) from bayes_implicit_solvent.gb_models.obc2_parameters import mbondi_model initial_tree = mbondi_model initial_tree.remove_node('[#14]') # otherwise everything is -inf, because this type will be empty initial_tree.proposal_sigmas['radius'] = 1e-2 * RADIUS_UNIT initial_tree.proposal_sigmas['scale_factor'] = 1e-2 # add one more parameter per element appearing in FreeSolv but not specified in obc2 parameter set to initial tree for i in [17, 35, 53]: smirks = '[#{}]'.format(i) initial_tree.add_child(smirks, '*') initial_tree.un_delete_able_types.add(smirks) specifiers = ['X1', 'X2', 'X3', 'X4', 'a', 'A', '-1', '+0', '+1', '+2'] atom_specification_proposal = AtomSpecificationProposal(atomic_specifiers=specifiers) smirks_elaboration_proposal = atom_specification_proposal print('initial tree:') print(initial_tree) n_configuration_samples = 25 import os name = 'tree_rjmc_n_config={}_{}_ll'.format(n_configuration_samples, ll) smiles_subset_fname = os.path.join(data_path, 'smiles_subset_{}.txt'.format(name)) with open(smiles_subset_fname, 'w') as f: f.writelines(['{}\n'.format(s) for s in smiles_list]) from bayes_implicit_solvent.prior_checking import check_no_empty_types error_y_trees = [] def log_prob(tree): log_prior_value = check_no_empty_types(tree) theta = np.hstack([tree.get_radii(), tree.get_scale_factors()]) log_prior_value += log_prior(theta) if log_prior_value > -np.inf: try: # TODO: Parallelize. Note that multiprocessing.Pool won't work here because it doesn't play nice with SwigPy objects # TODO: update to allow scale factors to be variable also log_likelihood_value = 0 for mol in mols: radii = tree.assign_radii(mol.mol) / RADIUS_UNIT scale_factors = tree.assign_scale_factors(mol.mol) log_likelihood_value += mol.log_prob(radii, scale_factors) except: global error_y_trees error_y_trees.append(tree) print('Warning! Encountered un-anticipated exception!') return - np.inf return log_prior_value + log_likelihood_value else: return log_prior_value from bayes_implicit_solvent.samplers import tree_rjmc from pickle import dump n_iterations = 10000 result = tree_rjmc(initial_tree, log_prob, smirks_elaboration_proposal, n_iterations=n_iterations, fraction_cross_model_proposals=0.1) with open('elaborate_tree_rjmc2_run_n_compounds={}_n_iter={}_gaussian_ll.pkl'.format(len(mols), n_iterations), 'wb') as f: dump(result, f) with open('error_y_trees.pkl', 'wb') as f: dump(error_y_trees, f)
bayes_implicit_solvent/rjmc_experiments/tree_rjmc2.py
3,094
otherwise everything is -inf, because this type will be empty add one more parameter per element appearing in FreeSolv but not specified in obc2 parameter set to initial tree TODO: Parallelize. Note that multiprocessing.Pool won't work here because it doesn't play nice with SwigPy objects TODO: update to allow scale factors to be variable also
345
en
0.723222
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging from ray.rllib.agents import Agent, with_common_config from ray.rllib.agents.ppo.ppo_policy_graph import PPOPolicyGraph from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer from ray.rllib.utils.annotations import override logger = logging.getLogger(__name__) # yapf: disable # __sphinx_doc_begin__ DEFAULT_CONFIG = with_common_config({ # If true, use the Generalized Advantage Estimator (GAE) # with a value function, see https://arxiv.org/pdf/1506.02438.pdf. "use_gae": True, # GAE(lambda) parameter "lambda": 1.0, # Initial coefficient for KL divergence "kl_coeff": 0.2, # Size of batches collected from each worker "sample_batch_size": 200, # Number of timesteps collected for each SGD round "train_batch_size": 4000, # Total SGD batch size across all devices for SGD "sgd_minibatch_size": 128, # Number of SGD iterations in each outer loop "num_sgd_iter": 30, # Stepsize of SGD "lr": 5e-5, # Learning rate schedule "lr_schedule": None, # Share layers for value function "vf_share_layers": False, # Coefficient of the value function loss "vf_loss_coeff": 1.0, # Coefficient of the entropy regularizer "entropy_coeff": 0.0, # PPO clip parameter "clip_param": 0.3, # Clip param for the value function. Note that this is sensitive to the # scale of the rewards. If your expected V is large, increase this. "vf_clip_param": 10.0, # If specified, clip the global norm of gradients by this amount "grad_clip": None, # Target value for KL divergence "kl_target": 0.01, # Whether to rollout "complete_episodes" or "truncate_episodes" "batch_mode": "truncate_episodes", # Which observation filter to apply to the observation "observation_filter": "NoFilter", # Uses the sync samples optimizer instead of the multi-gpu one. This does # not support minibatches. "simple_optimizer": False, # (Deprecated) Use the sampling behavior as of 0.6, which launches extra # sampling tasks for performance but can waste a large portion of samples. "straggler_mitigation": False, }) # __sphinx_doc_end__ # yapf: enable class PPOAgent(Agent): """Multi-GPU optimized implementation of PPO in TensorFlow.""" _agent_name = "PPO" _default_config = DEFAULT_CONFIG _policy_graph = PPOPolicyGraph @override(Agent) def _init(self): self._validate_config() self.local_evaluator = self.make_local_evaluator( self.env_creator, self._policy_graph) self.remote_evaluators = self.make_remote_evaluators( self.env_creator, self._policy_graph, self.config["num_workers"]) if self.config["simple_optimizer"]: self.optimizer = SyncSamplesOptimizer( self.local_evaluator, self.remote_evaluators, { "num_sgd_iter": self.config["num_sgd_iter"], "train_batch_size": self.config["train_batch_size"], }) else: self.optimizer = LocalMultiGPUOptimizer( self.local_evaluator, self.remote_evaluators, { "sgd_batch_size": self.config["sgd_minibatch_size"], "num_sgd_iter": self.config["num_sgd_iter"], "num_gpus": self.config["num_gpus"], "sample_batch_size": self.config["sample_batch_size"], "num_envs_per_worker": self.config["num_envs_per_worker"], "train_batch_size": self.config["train_batch_size"], "standardize_fields": ["advantages"], "straggler_mitigation": ( self.config["straggler_mitigation"]), }) @override(Agent) def _train(self): if "observation_filter" not in self.raw_user_config: # TODO(ekl) remove this message after a few releases logger.info( "Important! Since 0.7.0, observation normalization is no " "longer enabled by default. To enable running-mean " "normalization, set 'observation_filter': 'MeanStdFilter'. " "You can ignore this message if your environment doesn't " "require observation normalization.") prev_steps = self.optimizer.num_steps_sampled fetches = self.optimizer.step() if "kl" in fetches: # single-agent self.local_evaluator.for_policy( lambda pi: pi.update_kl(fetches["kl"])) else: def update(pi, pi_id): if pi_id in fetches: pi.update_kl(fetches[pi_id]["kl"]) else: logger.debug( "No data for {}, not updating kl".format(pi_id)) # multi-agent self.local_evaluator.foreach_trainable_policy(update) res = self.optimizer.collect_metrics( self.config["collect_metrics_timeout"]) res.update( timesteps_this_iter=self.optimizer.num_steps_sampled - prev_steps, info=dict(fetches, **res.get("info", {}))) # Warn about bad clipping configs if self.config["vf_clip_param"] <= 0: rew_scale = float("inf") elif res["policy_reward_mean"]: rew_scale = 0 # punt on handling multiagent case else: rew_scale = round( abs(res["episode_reward_mean"]) / self.config["vf_clip_param"], 0) if rew_scale > 100: logger.warning( "The magnitude of your environment rewards are more than " "{}x the scale of `vf_clip_param`. ".format(rew_scale) + "This means that it will take more than " "{} iterations for your value ".format(rew_scale) + "function to converge. If this is not intended, consider " "increasing `vf_clip_param`.") return res def _validate_config(self): if self.config["sgd_minibatch_size"] > self.config["train_batch_size"]: raise ValueError( "Minibatch size {} must be <= train batch size {}.".format( self.config["sgd_minibatch_size"], self.config["train_batch_size"])) if (self.config["batch_mode"] == "truncate_episodes" and not self.config["use_gae"]): raise ValueError( "Episode truncation is not supported without a value " "function. Consider setting batch_mode=complete_episodes.") if (self.config["multiagent"]["policy_graphs"] and not self.config["simple_optimizer"]): logger.info( "In multi-agent mode, policies will be optimized sequentially " "by the multi-GPU optimizer. Consider setting " "simple_optimizer=True if this doesn't work for you.") if not self.config["vf_share_layers"]: logger.warning( "FYI: By default, the value function will not share layers " "with the policy model ('vf_share_layers': False).")
python/ray/rllib/agents/ppo/ppo.py
7,337
Multi-GPU optimized implementation of PPO in TensorFlow. yapf: disable __sphinx_doc_begin__ If true, use the Generalized Advantage Estimator (GAE) with a value function, see https://arxiv.org/pdf/1506.02438.pdf. GAE(lambda) parameter Initial coefficient for KL divergence Size of batches collected from each worker Number of timesteps collected for each SGD round Total SGD batch size across all devices for SGD Number of SGD iterations in each outer loop Stepsize of SGD Learning rate schedule Share layers for value function Coefficient of the value function loss Coefficient of the entropy regularizer PPO clip parameter Clip param for the value function. Note that this is sensitive to the scale of the rewards. If your expected V is large, increase this. If specified, clip the global norm of gradients by this amount Target value for KL divergence Whether to rollout "complete_episodes" or "truncate_episodes" Which observation filter to apply to the observation Uses the sync samples optimizer instead of the multi-gpu one. This does not support minibatches. (Deprecated) Use the sampling behavior as of 0.6, which launches extra sampling tasks for performance but can waste a large portion of samples. __sphinx_doc_end__ yapf: enable TODO(ekl) remove this message after a few releases single-agent multi-agent Warn about bad clipping configs punt on handling multiagent case
1,384
en
0.76251
import re from itertools import chain from django.core.exceptions import FieldError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import OrderBy, Random, RawSQL, Ref from django.db.models.query_utils import QueryWrapper, select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.datastructures import EmptyResultSet from django.db.models.sql.query import Query, get_order_dir from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseError from django.utils.six.moves import zip class SQLCompiler(object): def __init__(self, query, connection, using): self.query = query self.connection = connection self.using = using self.quote_cache = {'*': '*'} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)') self.subquery = False def setup_query(self): if all(self.query.alias_refcount[a] == 0 for a in self.query.tables): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select() self.col_count = len(self.select) def pre_sql_setup(self): """ Does any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query() order_by = self.get_order_by() self.where, self.having = self.query.where.split_having() extra_select = self.get_extra_select(order_by, self.select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Returns a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, 'as_sql'): expressions.append(self.query.resolve_ref(expr)) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. for expr, _, _ in select: cols = expr.get_group_by_cols() for col in cols: expressions.append(col) for expr, (sql, params, is_ref) in order_by: if expr.contains_aggregate: continue # We can skip References to select clause, as all expressions in # the select clause are already part of the group by. if is_ref: continue expressions.extend(expr.get_source_expressions()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: sql, params = self.compile(expr) if (sql, tuple(params)) not in seen: result.append((sql, params)) seen.add((sql, tuple(params))) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # The logic here is: if the main model's primary key is in the # query, then set new_expressions to that field. If that happens, # then also add having expressions to group by. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if (getattr(expr, 'target', None) == self.query.model._meta.pk and getattr(expr, 'alias', None) == self.query.tables[0]): pk = expr break if pk: # MySQLism: Columns in HAVING clause must be added to the GROUP BY. expressions = [pk] + [expr for expr in expressions if expr in having] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. pks = {expr for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key} aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases ] return expressions def get_select(self): """ Returns three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - Which model to instantiate - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: select_list = [] for c in self.get_default_columns(): select_list.append(select_idx) select.append((c, None)) select_idx += 1 klass_info = { 'model': self.query.model, 'select_fields': select_list, } # self.query.select is a special case. These columns never go to # any model. for col in self.query.select: select.append((col, None)) select_idx += 1 for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info['related_klass_infos'] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info['related_klass_infos']: if ki['from_parent']: ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields']) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] for col, alias in select: ret.append((col, self.compile(col, select_format=True), alias)) return ret, klass_info, annotations def get_order_by(self): """ Returns a list of 2-tuples of form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by else: ordering = (self.query.order_by or self.query.get_meta().ordering or []) if self.query.standard_ordering: asc, desc = ORDER_DIR['ASC'] else: asc, desc = ORDER_DIR['DESC'] order_by = [] for pos, field in enumerate(ordering): if hasattr(field, 'resolve_expression'): if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field.reverse_ordering() order_by.append((field, False)) continue if field == '?': # random order_by.append((OrderBy(Random()), False)) continue col, order = get_order_dir(field, asc) descending = True if order == 'DESC' else False if col in self.query.annotation_select: # Reference to expression in SELECT clause order_by.append(( OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True)) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT clause order_by.append(( OrderBy(self.query.annotations[col], descending=descending), False)) continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) order_by.append(( OrderBy( RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []), descending=descending ), False)) continue if not self.query._extra or col not in self.query._extra: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. order_by.extend(self.find_ordering_name( field, self.query.get_meta(), default_order=asc)) else: if col not in self.query.extra_select: order_by.append(( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False)) else: order_by.append(( OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True)) result = [] seen = set() for expr, is_ref in order_by: resolved = expr.resolve_expression( self.query, allow_joins=True, reuse=None) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql).group(1) if (without_ordering, tuple(params)) in seen: continue seen.add((without_ordering, tuple(params))) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] select_sql = [t[1] for t in select] if self.query.distinct and not self.query.distinct_fields: for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql).group(1) if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ((name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( name in self.query.external_aliases and name not in self.query.table_map)): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node, select_format=False): vendor_impl = getattr(node, 'as_' + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) if select_format and not self.subquery: return node.output_field.select_format(self, sql, params) return sql, params def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False): """ Creates the SQL for this query. Returns the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ if with_limits and self.query.low_mark == self.query.high_mark: return '', () self.subquery = subquery refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup() if with_limits and self.query.low_mark == self.query.high_mark: return '', () distinct_fields = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' -- see # docstring of get_from_clause() for details. from_, f_params = self.get_from_clause() where, w_params = self.compile(self.where) if self.where is not None else ("", []) having, h_params = self.compile(self.having) if self.having is not None else ("", []) params = [] result = ['SELECT'] if self.query.distinct: result.append(self.connection.ops.distinct_sql(distinct_fields)) out_cols = [] col_idx = 1 for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)) elif with_col_aliases: s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx) col_idx += 1 params.extend(s_params) out_cols.append(s_sql) result.append(', '.join(out_cols)) result.append('FROM') result.extend(from_) params.extend(f_params) if where: result.append('WHERE %s' % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError( "annotate() + distinct(fields) is not implemented.") if not order_by: order_by = self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) if having: result.append('HAVING %s' % having) params.extend(h_params) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) result.append('ORDER BY %s' % ', '.join(ordering)) if with_limits: if self.query.high_mark is not None: result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark)) if self.query.low_mark: if self.query.high_mark is None: val = self.connection.ops.no_limit_value() if val: result.append('LIMIT %d' % val) result.append('OFFSET %d' % self.query.low_mark) if self.query.select_for_update and self.connection.features.has_select_for_update: if self.connection.get_autocommit(): raise TransactionManagementError( "select_for_update cannot be used outside of a transaction." ) # If we've been asked for a NOWAIT query but the backend does # not support it, raise a DatabaseError otherwise we could get # an unexpected deadlock. nowait = self.query.select_for_update_nowait if nowait and not self.connection.features.has_select_for_update_nowait: raise DatabaseError('NOWAIT is not supported on this database backend.') result.append(self.connection.ops.for_update_sql(nowait=nowait)) return ' '.join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def as_nested_sql(self): """ Perform the same functionality as the as_sql() method, returning an SQL string and parameters. However, the alias prefixes are bumped beforehand (in a copy -- the current query isn't changed), and any ordering is removed if the query is unsliced. Used when nesting this query inside another. """ obj = self.query.clone() if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields: # If there is no slicing in use, then we can safely drop all ordering obj.clear_ordering(True) nested_sql = obj.get_compiler(connection=self.connection).as_sql(subquery=True) if nested_sql == ('', ()): raise EmptyResultSet return nested_sql def get_default_columns(self, start_alias=None, opts=None, from_parent=None): """ Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.get_meta() only_load = self.deferred_to_columns() if not start_alias: start_alias = self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if field.model in only_load and field.attname not in only_load[field.model]: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Returns a quoted list of fields to use in DISTINCT ON part of the query. Note that this method can alter the tables in the query, and thus it must be called before get_from_clause(). """ qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name result = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(name) else: result.append("%s.%s" % (qn(alias), qn2(target.column))) return result def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None): """ Returns the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = True if order == 'DESC' else False pieces = name.split(LOOKUP_SEP) field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless the attribute name # of the field is specified. if field.is_relation and path and opts.ordering and name != field.attname: # Firstly, avoid infinite loops. if not already_seen: already_seen = set() join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins) if join_tuple in already_seen: raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets] def _setup_joins(self, pieces, opts, alias): """ A helper method for get_order_by and get_distinct. Note that get_ordering and get_distinct must produce same target columns on same input, as the prefixes of get_ordering and get_distinct must match. Executing SQL where this is not true is an error. """ if not alias: alias = self.query.get_initial_alias() field, targets, opts, joins, path = self.query.setup_joins( pieces, opts, alias) alias = joins[-1] return field, targets, alias, joins, path, opts def get_from_clause(self): """ Returns a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Sub-classes, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables we need. This means the select columns, ordering and distinct must be done first. """ result = [] params = [] for alias in self.query.tables: if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: result.append(', %s' % self.quote_name_unless_alias(alias)) return result, params def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain(direct_choices, reverse_choices) related_klass_infos = [] if not restricted and self.query.max_depth and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() only_load = self.query.get_loaded_field_names() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: if isinstance(self.query.select_related, dict): requested = self.query.select_related restricted = True else: restricted = False def get_related_klass_infos(klass_info, related_klass_infos): klass_info['related_klass_infos'] = related_klass_infos for f in opts.fields: field_model = f.model._meta.concrete_model fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or (cur_depth == 1 and f.name in requested): raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or '(none)', ) ) else: next = False if not select_related_descend(f, restricted, requested, only_load.get(field_model)): continue klass_info = { 'model': f.remote_field.model, 'field': f, 'reverse': False, 'from_parent': False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _ = self.query.setup_joins( [f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_klass_infos = self.get_related_selections( select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) _, _, _, joins, _ = self.query.setup_joins([related_field_name], opts, root_alias) alias = joins[-1] from_parent = issubclass(model, opts.model) klass_info = { 'model': model, 'field': f, 'reverse': True, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested.keys()).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( 'Invalid field name(s) given in select_related: %s. ' 'Choices are: %s' % ( ', '.join(invalid_fields), ', '.join(_get_field_choices()) or '(none)', ) ) return related_klass_infos def deferred_to_columns(self): """ Converts the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Returns the dictionary. """ columns = {} self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb) return columns def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, row, converters): row = list(row) for pos, (convs, expression) in converters.items(): value = row[pos] for converter in convs: value = converter(value, expression, self.connection, self.query.context) row[pos] = value return tuple(row) def results_iter(self, results=None): """ Returns an iterator over the results from executing this query. """ converters = None if results is None: results = self.execute_sql(MULTI) fields = [s[0] for s in self.select[0:self.col_count]] converters = self.get_converters(fields) for rows in results: for row in rows: if converters: row = self.apply_converters(row, converters) yield row def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ # This is always executed on a query clone, so we can modify self.query self.query.add_extra({'a': 1}, None, None, None, None, None) self.query.set_extra_mask(['a']) return bool(self.execute_sql(SINGLE)) def execute_sql(self, result_type=MULTI): """ Run the query against the database and returns the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ if not result_type: result_type = NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: cursor.close() raise if result_type == CURSOR: # Caller didn't specify a result_type, so just give them back the # cursor to process (and close). return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0:self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count ) if not self.connection.features.can_use_chunked_reads: try: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. return list(result) finally: # done with the cursor cursor.close() return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name if len(columns) == 1: sql, params = self.as_sql() return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = '%s.%s' % (qn(alias), qn2(columns[index])) self.query.where.add( QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND') sql, params = self.as_sql() return 'EXISTS (%s)' % sql, params class SQLInsertCompiler(SQLCompiler): def __init__(self, *args, **kwargs): self.return_id = False super(SQLInsertCompiler, self).__init__(*args, **kwargs) def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Checks for raw values, expressions and fields with get_placeholder() defined in that order. When field is None, the value is considered raw and is used as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, 'as_sql'): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, 'get_placeholder'): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = '%s', [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, 'resolve_expression'): value = value.resolve_expression(self.query, allow_joins=False, for_save=True) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' 'can only be used to update, not to insert.' % (value, field) ) if value.contains_aggregate: raise FieldError("Aggregate functions are not allowed in this query") else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, generate placeholder SQL and parameters for each field and value, and return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() result = ['INSERT INTO %s' % qn(opts.db_table)] has_fields = bool(self.query.fields) fields = self.query.fields if has_fields else [opts.pk] result.append('(%s)' % ', '.join(qn(f.column) for f in fields)) if has_fields: value_rows = [ [self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields] for obj in self.query.objs ] else: # An empty object. value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = (not self.return_id and self.connection.features.has_bulk_insert) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) if self.return_id and self.connection.features.can_return_id_from_insert: params = param_rows[0] col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) r_fmt, r_params = self.connection.ops.return_insert_id() # Skip empty r_fmt to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. if r_fmt: result.append(r_fmt % col) params += r_params return [(" ".join(result), tuple(params))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, return_id=False): assert not (return_id and len(self.query.objs) != 1) self.return_id = return_id with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not (return_id and cursor): return if self.connection.features.can_return_id_from_insert: return self.connection.ops.fetch_returned_insert_id(cursor) return self.connection.ops.last_insert_id(cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column) class SQLDeleteCompiler(SQLCompiler): def as_sql(self): """ Creates the SQL for this query. Returns the SQL string and list of parameters. """ assert len([t for t in self.query.tables if self.query.alias_refcount[t] > 0]) == 1, \ "Can only delete from one table at a time." qn = self.quote_name_unless_alias result = ['DELETE FROM %s' % qn(self.query.tables[0])] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(params) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Creates the SQL for this query. Returns the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return '', () table = self.query.tables[0] qn = self.quote_name_unless_alias result = ['UPDATE %s' % qn(table)] result.append('SET') values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, 'resolve_expression'): val = val.resolve_expression(self.query, allow_joins=False, for_save=True) if val.contains_aggregate: raise FieldError("Aggregate functions are not allowed in this query") elif hasattr(val, 'prepare_database_save'): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = '%s' name = field.column if hasattr(val, 'as_sql'): sql, params = self.compile(val) values.append('%s = %s' % (qn(name), sql)) update_params.extend(params) elif val is not None: values.append('%s = %s' % (qn(name), placeholder)) update_params.append(val) else: values.append('%s = NULL' % qn(name)) if not values: return '', () result.append(', '.join(values)) where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Returns the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super(SQLUpdateCompiler, self).execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, we need to do some munging of the "where" conditions to match the format required for (portable) SQL updates. That is done here. Further, if we are going to be running multiple updates, we pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.clone(klass=Query) query.select_related = False query.clear_ordering(True) query._extra = {} query.select = [] query.add_fields([query.get_meta().pk.name]) super(SQLUpdateCompiler, self).pre_sql_setup() must_pre_select = count > 1 and not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.where = self.query.where_class() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) self.query.add_filter(('pk__in', idents)) self.query.related_ids = idents else: # The fast path. Filters and updates in one query. self.query.add_filter(('pk__in', query)) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Creates the SQL for this query. Returns the SQL string and list of parameters. """ # Empty SQL for the inner query is a marker that the inner query # isn't going to produce any results. This can happen when doing # LIMIT 0 queries (generated by qs[:0]) for example. if not self.query.subquery: raise EmptyResultSet sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation, select_format=True) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ', '.join(sql) params = tuple(params) sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery) params = params + self.query.sub_params return sql, params def cursor_iter(cursor, sentinel, col_count): """ Yields blocks of rows from a cursor and ensures the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), sentinel): yield [r[0:col_count] for r in rows] finally: cursor.close()
django/db/models/sql/compiler.py
54,078
A helper method for get_order_by and get_distinct. Note that get_ordering and get_distinct must produce same target columns on same input, as the prefixes of get_ordering and get_distinct must match. Executing SQL where this is not true is an error. Perform the same functionality as the as_sql() method, returning an SQL string and parameters. However, the alias prefixes are bumped beforehand (in a copy -- the current query isn't changed), and any ordering is removed if the query is unsliced. Used when nesting this query inside another. Creates the SQL for this query. Returns the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. Creates the SQL for this query. Returns the SQL string and list of parameters. Creates the SQL for this query. Returns the SQL string and list of parameters. Creates the SQL for this query. Returns the SQL string and list of parameters. Take a sequence of N fields and a sequence of M rows of values, generate placeholder SQL and parameters for each field and value, and return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. Yields blocks of rows from a cursor and ensures the cursor is closed when done. Converts the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Returns the dictionary. Run the query against the database and returns the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. Execute the specified update. Returns the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Checks for raw values, expressions and fields with get_placeholder() defined in that order. When field is None, the value is considered raw and is used as the placeholder, with no corresponding parameters returned. Returns the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). Returns a quoted list of fields to use in DISTINCT ON part of the query. Note that this method can alter the tables in the query, and thus it must be called before get_from_clause(). Returns a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Sub-classes, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables we need. This means the select columns, ordering and distinct must be done first. Returns a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". Returns a list of 2-tuples of form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). Returns three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - Which model to instantiate - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. Does any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. If the update depends on results from other tables, we need to do some munging of the "where" conditions to match the format required for (portable) SQL updates. That is done here. Further, if we are going to be running multiple updates, we pull out the id values to update at this point so that they don't change as a result of the progressive updates. Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). Returns an iterator over the results from executing this query. The select, klass_info, and annotations are needed by QuerySet.iterator() these are set as a side-effect of executing the query. Note that we calculate separately a list of extra select columns needed for grammatical correctness of the query, but these columns are not included in self.select. Some examples: SomeModel.objects.annotate(Count('somecol')) GROUP BY: all fields of the model SomeModel.objects.values('name').annotate(Count('somecol')) GROUP BY: name SomeModel.objects.annotate(Count('somecol')).values('name') GROUP BY: all cols of the model SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk') GROUP BY: name, pk SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') GROUP BY: name, pk In fact, the self.query.group_by is the minimal set to GROUP BY. It can't be ever restricted to a smaller set, but additional columns in HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately the end result is that it is impossible to force the query to have a chosen GROUP BY clause - you can almost do this by using the form: .values(*wanted_cols).annotate(AnAggregate()) but any later annotations, extra selects, values calls that refer some column outside of the wanted_cols, order_by, or even filter calls can alter the GROUP BY clause. The query.group_by is either None (no GROUP BY at all), True (group by select fields), or a list of expressions to be added to the group by. If the group by is set to a list (by .values() call most likely), then we need to add everything in it to the GROUP BY clause. Backwards compatibility hack for setting query.group_by. Remove when we have public API way of forcing the GROUP BY clause. Converts string references to expressions. Note that even if the group_by is set, it is only the minimal set to group by. So, we need to add cols in select, order_by, and having into the select in any case. We can skip References to select clause, as all expressions in the select clause are already part of the group by. If the DB can group by primary key, then group by the primary key of query's main model. Note that for PostgreSQL the GROUP BY clause must include the primary key of every table, but for MySQL it is enough to have the main table's primary key. The logic here is: if the main model's primary key is in the query, then set new_expressions to that field. If that happens, then also add having expressions to group by. Is this a reference to query's base table primary key? If the expression isn't a Col-like, then skip the expression. MySQLism: Columns in HAVING clause must be added to the GROUP BY. Filter out all expressions associated with a table's primary key present in the grouped columns. This is done by identifying all tables that have their primary key included in the grouped columns and removing non-primary key columns referring to them. self.query.select is a special case. These columns never go to any model. random Reference to expression in SELECT clause References to an expression which is masked out of the SELECT clause This came in through an extra(order_by=...) addition. Pass it on verbatim. 'col' is of the form 'field' or 'field1__field2' or '-field1__field2__field', etc. Don't add the same column twice, but the order direction is not taken into account so we strip it. When this entire method is refactored into expressions, then we can check each part as we generate it. This must come after 'select', 'ordering', and 'distinct' -- see docstring of get_from_clause() for details. If we've been asked for a NOWAIT query but the backend does not support it, raise a DatabaseError otherwise we could get an unexpected deadlock. Finally do cleanup - get rid of the joins we created above. If there is no slicing in use, then we can safely drop all ordering The 'seen_models' is used to optimize checking the needed parent alias for a given field. This also includes None -> start_alias to be used by local fields. A proxy model will have a different model and concrete_model. We will assign None if the field belongs to this model. Avoid loading data for already loaded parents. We end up here in the case select_related() resolution proceeds from parent model to child model. In that case the parent model data is already present in the SELECT clause, and we want to avoid reloading the same data again. If we get to this point and the field is a relation to another model, append the default ordering for that model unless the attribute name of the field is specified. Firstly, avoid infinite loops. Extra tables can end up in self.tables, but not in the alias_map if they aren't in a join. That's OK. We skip them. Only add the alias if it's not already present (the table_alias() call increments the refcount, so an alias refcount of one means this is the only reference). We've recursed far enough; bail out. Setup for the case when only particular related fields should be included in the related selection. If a non-related field is used like a relation, or if a single non-relational field is given. This is always executed on a query clone, so we can modify self.query Caller didn't specify a result_type, so just give them back the cursor to process (and close). done with the cursor If we are using non-chunked reads, we return the same data structure as normally, but ensure it is all read into memory before going any further. done with the cursor A field value of None means the value is raw. This is an expression, let's compile it. Some fields (e.g. geo fields) need special munging before they can be inserted. Return the common case for the placeholder The following hook is only used by Oracle Spatial, which sometimes needs to yield 'NULL' and [] as its placeholder and params instead of '%s' and [None]. The 'NULL' placeholder is produced earlier by OracleOperations.get_geom_placeholder(). The following line removes the corresponding None parameter. See ticket 10888. Don't allow values containing Col expressions. They refer to existing columns on a row, but in the case of insert the row doesn't exist yet. list of (sql, [params]) tuples for each object to be saved Shape: [n_objs][n_fields][2] tuple like ([sqls], [[params]s]) for each object to be saved Shape: [n_objs][2][n_fields] Extract separate lists for placeholders and params. Each of these has shape [n_objs][n_fields] Params for each field are still lists, and need to be flattened. We don't need quote_name_unless_alias() here, since these are all going to be column names (so we can avoid the extra overhead). An empty object. Currently the backends just accept values when generating bulk queries and generate their own placeholders. Doing that isn't necessary and it should be possible to use placeholders and expressions in bulk inserts too. Skip empty r_fmt to allow subclasses to customize behavior for 3rd party backends. Refs 19096. Getting the placeholder for the field. Ensure base table is in the query Now we adjust the current query: reset the where clause and get rid of all the tables we don't need (since they're in the sub-select). Either we're using the idents in multiple update queries (so don't want them to change), or the db backend doesn't support selecting from the updating table (e.g. MySQL). The fast path. Filters and updates in one query. Empty SQL for the inner query is a marker that the inner query isn't going to produce any results. This can happen when doing LIMIT 0 queries (generated by qs[:0]) for example.
14,060
en
0.885392
import numpy as np import matplotlib.pyplot as plt from matplotlib.image import imread # 데이터 준비 x = np.arange(0, 6, 0.1) # 0에서 6까지 0.1 간격으로 생성 y1 = np.sin(x) y2 = np.cos(x) # 그래프 그리기 plt.plot(x, y1, label='sin') plt.plot(x, y2, linestyle='--', label='cos') # cos 함수는 점선으로 그리기 plt.xlabel('x') # x축 이름 plt.ylabel('y') # y축 이름 plt.title('sin & cos') # 제목 plt.legend() # 이미지 표시 img = imread('background.jpg') plt.imshow(img) plt.show()
Books/DeepLearningfromScratch/P01_HelloPython/numpy_pyplot.py
527
데이터 준비 0에서 6까지 0.1 간격으로 생성 그래프 그리기 cos 함수는 점선으로 그리기 x축 이름 y축 이름 제목 이미지 표시
73
ko
1.00007
import torch import torch.nn.functional as F import torch_glow from collections import namedtuple from tests.utils import jitVsGlow # Basic test of the PyTorch conv2d Node on Glow. def test_conv2d_basic(): def conv2d_basic(inputs, filters): conv = F.conv2d(inputs, filters, padding=1) return F.relu(conv) inputs = torch.randn(1, 4, 5, 5) filters = torch.randn(8, 4, 3, 3) jitVsGlow(conv2d_basic, inputs, filters) # Test of the PyTorch conv2d Node with a provided bias tensor. def test_conv2d_with_bias(): def conv2d_with_bias(inputs, filters, bias): conv = F.conv2d(inputs, filters, bias) return F.relu(conv) inputs = torch.randn(1, 4, 5, 5) filters = torch.randn(8, 4, 3, 3) bias = torch.randn(8) jitVsGlow(conv2d_with_bias, inputs, filters, bias) # Test of the PyTorch conv2d Node sweeping through various parameters of the # Node to test that they work correctly. def test_conv2d_param_sweep(): hwOpts = [3, 4] padOpts = [0, 1] groupsOpts = [1, 2] dilationOpts = [1, 2] strideOpts = [1, 2] Setting = namedtuple('Setting', ['h', 'w', 'p', 'g', 'd', 's',]) settings = [Setting(h=h, w=w, p=p, g=g, d=d, s=s) for h in hwOpts for w in hwOpts for p in padOpts for g in groupsOpts for d in dilationOpts for s in strideOpts] for setting in settings: def conv2d_param_sweep(inputs, filters): conv = F.conv2d(inputs, filters, padding=setting.p, groups=setting.g) return F.relu(conv) inputs = torch.randn(2, 4, setting.h, setting.w) filters = torch.randn(8, 4/setting.g, 3, 3) jitVsGlow(conv2d_param_sweep, inputs, filters)
torch_glow/tests/nodes/conv2d_test.py
1,645
Basic test of the PyTorch conv2d Node on Glow. Test of the PyTorch conv2d Node with a provided bias tensor. Test of the PyTorch conv2d Node sweeping through various parameters of the Node to test that they work correctly.
221
en
0.830215
import argparse import sys import pathlib import random from unittest import mock import pytest from _repobee import plugin import repobee_plug as plug from repobee_feedback import feedback ASSIGNMENT_NAMES = ("task-1", "task-2") STUDENT_TEAMS = tuple( [ plug.StudentTeam(members=members) for members in (["slarse"], ["glassey"], ["grundb", "glennol"]) ] ) STUDENT_TEAM_NAMES = tuple(map(str, STUDENT_TEAMS)) PASS_ISSUE = plug.Issue(title="Pass", body="Well done!\nAbsolutely flawless!") KOMP_ISSUE = plug.Issue( title="Komplettering", body="Not perfect, you need to fix this." ) FAIL_ISSUE = plug.Issue( title="Fail", body="Unfortunately, there are severe errors." ) ISSUES = (PASS_ISSUE, KOMP_ISSUE, FAIL_ISSUE) random.seed(512) def _write_issue(issue: plug.Issue, path: pathlib.Path): text = "{}\n{}".format(issue.title, issue.body) path.write_text(text, encoding=sys.getdefaultencoding()) def _write_multi_issues_file(repos_and_issues, path): with open(str(path), mode="w", encoding=sys.getdefaultencoding()) as file: cur = 0 for repo_name, issue in repos_and_issues: if cur: file.write("\n") file.write("#ISSUE#{}#{}\n".format(repo_name, issue.title)) file.write(issue.body) cur += 1 def test_register(): """Just test that there is no crash""" plugin.register_plugins([feedback]) @pytest.fixture def parsed_args_issues_dir(tmp_path): return argparse.Namespace( students=list(STUDENT_TEAMS), assignments=list(ASSIGNMENT_NAMES), batch_mode=True, issues_dir=str(tmp_path), multi_issues_file=None, truncation_length=50, allow_missing=False, ) @pytest.fixture def parsed_args_multi_issues_file(with_multi_issues_file): issues_file, _ = with_multi_issues_file return argparse.Namespace( students=list(STUDENT_TEAMS), assignments=list(ASSIGNMENT_NAMES), batch_mode=True, issues_dir=None, multi_issues_file=str(issues_file), truncation_length=50, allow_missing=False, ) @pytest.fixture def api_mock(): return mock.MagicMock(spec=plug.PlatformAPI) @pytest.fixture def with_issues(tmp_path): """Create issue files in a temporary directory and return a list of (team, issue) tuples. """ repo_names = plug.generate_repo_names(STUDENT_TEAM_NAMES, ASSIGNMENT_NAMES) existing_issues = [] for repo_name in repo_names: issue_file = tmp_path / "{}.md".format(repo_name) issue = random.choice(ISSUES) _write_issue(issue, issue_file) existing_issues.append((repo_name, issue)) return existing_issues @pytest.fixture def with_multi_issues_file(tmp_path): """Create the multi issues file.""" repo_names = plug.generate_repo_names(STUDENT_TEAM_NAMES, ASSIGNMENT_NAMES) repos_and_issues = [ (repo_name, random.choice(ISSUES)) for repo_name in repo_names ] issues_file = tmp_path / "issues.md" _write_multi_issues_file(repos_and_issues, issues_file) return issues_file, repos_and_issues class TestCallback: """Tests for the primary callback.""" def test_opens_issues_from_issues_dir( self, with_issues, parsed_args_issues_dir, api_mock ): """Test that the callback calls the API.open_issue for the expected repos and issues, when the issues all exist and are well formed. """ expected_calls = [ mock.call(issue.title, issue.body, mock.ANY) for repo_name, issue in with_issues ] feedback.callback(args=parsed_args_issues_dir, api=api_mock) api_mock.create_issue.assert_has_calls(expected_calls, any_order=True) def test_aborts_if_issue_is_missing( self, with_issues, parsed_args_issues_dir, api_mock, tmp_path ): """Test that the callback exits with a plug.PlugError if any of the expected issues is not found. """ repo_without_issue = plug.generate_repo_name( STUDENT_TEAM_NAMES[-1], ASSIGNMENT_NAMES[0] ) missing_file = tmp_path / "{}.md".format(repo_without_issue) missing_file.unlink() with pytest.raises(plug.PlugError) as exc_info: feedback.callback(args=parsed_args_issues_dir, api=api_mock) assert repo_without_issue in str(exc_info.value) assert not api_mock.create_issue.called def test_ignores_missing_issue_if_allow_missing( self, with_issues, parsed_args_issues_dir, api_mock, tmp_path ): """Test that missing issues are ignored if --allow-mising is set.""" repo_without_issue = plug.generate_repo_name( STUDENT_TEAM_NAMES[-1], ASSIGNMENT_NAMES[0] ) (tmp_path / "{}.md".format(repo_without_issue)).unlink() expected_calls = [ mock.call(issue.title, issue.body, mock.ANY) for repo_name, issue in with_issues if repo_name != repo_without_issue ] args_dict = vars(parsed_args_issues_dir) args_dict["allow_missing"] = True args = argparse.Namespace(**args_dict) feedback.callback(args=args, api=api_mock) api_mock.create_issue.assert_has_calls(expected_calls, any_order=True) def test_opens_nothing_if_open_prompt_returns_false( self, with_issues, parsed_args_issues_dir, api_mock ): """Test that the callback does not attempt to open any issues if the 'may I open' prompt returns false. """ args_dict = vars(parsed_args_issues_dir) args_dict["batch_mode"] = False parsed_args_interactive = argparse.Namespace(**args_dict) with mock.patch("builtins.input", return_value="n", autospec=True): feedback.callback(args=parsed_args_interactive, api=api_mock) assert not api_mock.create_issue.called def test_opens_issues_from_multi_issues_file( self, with_multi_issues_file, api_mock, parsed_args_multi_issues_file ): """Test that the callback opens issues correctly when they are all contained in a multi issues file. """ issues_file, repos_and_issues = with_multi_issues_file expected_calls = [ mock.call(issue.title, issue.body, mock.ANY) for repo_name, issue in repos_and_issues ] feedback.callback(args=parsed_args_multi_issues_file, api=api_mock) api_mock.create_issue.assert_has_calls(expected_calls) def test_skips_unexpected_issues_in_multi_issues_file( self, with_multi_issues_file, parsed_args_multi_issues_file, api_mock ): """Test that an exception is raised if one or more issues are found relating to student repos that ar not in prod(assignments, students). """ student_teams = parsed_args_multi_issues_file.students args_dict = vars(parsed_args_multi_issues_file) args_dict["students"] = student_teams[:-1] args = argparse.Namespace(**args_dict) unexpected_repos = plug.generate_repo_names( student_teams[-1:], ASSIGNMENT_NAMES ) _, repos_and_issues = with_multi_issues_file expected_calls = [ mock.call(issue.title, issue.body, mock.ANY) for repo_name, issue in repos_and_issues if repo_name not in unexpected_repos ] feedback.callback(args=args, api=api_mock) api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
tests/test_feedback.py
7,582
Tests for the primary callback. Test that the callback exits with a plug.PlugError if any of the expected issues is not found. Test that missing issues are ignored if --allow-mising is set. Test that the callback calls the API.open_issue for the expected repos and issues, when the issues all exist and are well formed. Test that the callback opens issues correctly when they are all contained in a multi issues file. Test that the callback does not attempt to open any issues if the 'may I open' prompt returns false. Just test that there is no crash Test that an exception is raised if one or more issues are found relating to student repos that ar not in prod(assignments, students). Create issue files in a temporary directory and return a list of (team, issue) tuples. Create the multi issues file.
803
en
0.900295
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """A recursive minimal eigen optimizer in Qiskit's optimization module.""" from copy import deepcopy from enum import Enum from typing import Optional, Union, List, Tuple, Dict import logging import numpy as np from qiskit.aqua.algorithms import NumPyMinimumEigensolver from qiskit.aqua.utils.validation import validate_min from .optimization_algorithm import OptimizationAlgorithm, OptimizationResult from .minimum_eigen_optimizer import MinimumEigenOptimizer, MinimumEigenOptimizationResult from ..converters.quadratic_program_to_qubo import QuadraticProgramToQubo from ..exceptions import QiskitOptimizationError from ..problems import Variable from ..problems.quadratic_program import QuadraticProgram logger = logging.getLogger(__name__) class IntermediateResult(Enum): """ Defines whether the intermediate results of :class:`~qiskit.optimization.algorithms.RecursiveMinimumEigenOptimizer` at each iteration should be stored and returned to the end user. """ NO_ITERATIONS = 0 """No intermediate results are stored.""" LAST_ITERATION = 1 """Only results from the last iteration are stored.""" ALL_ITERATIONS = 2 """All intermediate results are stored.""" class RecursiveMinimumEigenOptimizationResult(OptimizationResult): """Recursive Eigen Optimizer Result.""" def __init__(self, x: Union[List[float], np.ndarray], fval: float, variables: List[Variable], replacements: Dict[str, Tuple[str, int]], history: Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]) -> None: """ Constructs an instance of the result class. Args: x: the optimal value found in the optimization. fval: the optimal function value. variables: the list of variables of the optimization problem. replacements: a dictionary of substituted variables. Key is a variable being substituted, value is a tuple of substituting variable and a weight, either 1 or -1. history: a tuple containing intermediate results. The first element is a list of :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step via `min_num_vars_optimizer`. """ super().__init__(x, fval, variables, None) self._replacements = replacements self._history = history @property def replacements(self) -> Dict[str, Tuple[str, int]]: """ Returns a dictionary of substituted variables. Key is a variable being substituted, value is a tuple of substituting variable and a weight, either 1 or -1.""" return self._replacements @property def history(self) -> Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]: """ Returns intermediate results. The first element is a list of :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step via `min_num_vars_optimizer`. """ return self._history class RecursiveMinimumEigenOptimizer(OptimizationAlgorithm): """A meta-algorithm that applies a recursive optimization. The recursive minimum eigen optimizer applies a recursive optimization on top of :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer`. The algorithm is introduced in [1]. Examples: Outline of how to use this class: .. code-block:: from qiskit.aqua.algorithms import QAOA from qiskit.optimization.problems import QuadraticProgram from qiskit.optimization.algorithms import RecursiveMinimumEigenOptimizer problem = QuadraticProgram() # specify problem here # specify minimum eigen solver to be used, e.g., QAOA qaoa = QAOA(...) optimizer = RecursiveMinimumEigenOptimizer(qaoa) result = optimizer.solve(problem) References: [1]: Bravyi et al. (2019), Obstacles to State Preparation and Variational Optimization from Symmetry Protection. http://arxiv.org/abs/1910.08980. """ def __init__(self, min_eigen_optimizer: MinimumEigenOptimizer, min_num_vars: int = 1, min_num_vars_optimizer: Optional[OptimizationAlgorithm] = None, penalty: Optional[float] = None, history: Optional[IntermediateResult] = IntermediateResult.LAST_ITERATION) -> None: """ Initializes the recursive minimum eigen optimizer. This initializer takes a ``MinimumEigenOptimizer``, the parameters to specify until when to to apply the iterative scheme, and the optimizer to be applied once the threshold number of variables is reached. Args: min_eigen_optimizer: The eigen optimizer to use in every iteration. min_num_vars: The minimum number of variables to apply the recursive scheme. If this threshold is reached, the min_num_vars_optimizer is used. min_num_vars_optimizer: This optimizer is used after the recursive scheme for the problem with the remaining variables. penalty: The factor that is used to scale the penalty terms corresponding to linear equality constraints. history: Whether the intermediate results are stored. Default value is :py:obj:`~IntermediateResult.LAST_ITERATION`. Raises: QiskitOptimizationError: In case of invalid parameters (num_min_vars < 1). """ validate_min('min_num_vars', min_num_vars, 1) self._min_eigen_optimizer = min_eigen_optimizer self._min_num_vars = min_num_vars if min_num_vars_optimizer: self._min_num_vars_optimizer = min_num_vars_optimizer else: self._min_num_vars_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver()) self._penalty = penalty self._history = history self._qubo_converter = QuadraticProgramToQubo() def get_compatibility_msg(self, problem: QuadraticProgram) -> str: """Checks whether a given problem can be solved with this optimizer. Checks whether the given problem is compatible, i.e., whether the problem can be converted to a QUBO, and otherwise, returns a message explaining the incompatibility. Args: problem: The optimization problem to check compatibility. Returns: A message describing the incompatibility. """ return QuadraticProgramToQubo.get_compatibility_msg(problem) def solve(self, problem: QuadraticProgram) -> OptimizationResult: """Tries to solve the given problem using the recursive optimizer. Runs the optimizer to try to solve the optimization problem. Args: problem: The problem to be solved. Returns: The result of the optimizer applied to the problem. Raises: QiskitOptimizationError: Incompatible problem. QiskitOptimizationError: Infeasible due to variable substitution """ self._verify_compatibility(problem) # convert problem to QUBO, this implicitly checks if the problem is compatible problem_ = self._qubo_converter.convert(problem) problem_ref = deepcopy(problem_) # run recursive optimization until the resulting problem is small enough replacements = {} # type: Dict[str, Tuple[str, int]] min_eigen_results = [] # type: List[MinimumEigenOptimizationResult] while problem_.get_num_vars() > self._min_num_vars: # solve current problem with optimizer res = self._min_eigen_optimizer.solve(problem_) # type: MinimumEigenOptimizationResult if self._history == IntermediateResult.ALL_ITERATIONS: min_eigen_results.append(res) # analyze results to get strongest correlation correlations = res.get_correlations() i, j = self._find_strongest_correlation(correlations) x_i = problem_.variables[i].name x_j = problem_.variables[j].name if correlations[i, j] > 0: # set x_i = x_j problem_ = problem_.substitute_variables(variables={i: (j, 1)}) if problem_.status == QuadraticProgram.Status.INFEASIBLE: raise QiskitOptimizationError('Infeasible due to variable substitution') replacements[x_i] = (x_j, 1) else: # set x_i = 1 - x_j, this is done in two steps: # 1. set x_i = 1 + x_i # 2. set x_i = -x_j # 1a. get additional offset constant = problem_.objective.constant constant += problem_.objective.linear[i] constant += problem_.objective.quadratic[i, i] problem_.objective.constant = constant # 1b. get additional linear part for k in range(problem_.get_num_vars()): coeff = problem_.objective.linear[k] if k == i: coeff += 2*problem_.objective.quadratic[i, k] else: coeff += problem_.objective.quadratic[i, k] # set new coefficient if not too small if np.abs(coeff) > 1e-10: problem_.objective.linear[k] = coeff else: problem_.objective.linear[k] = 0 # 2. replace x_i by -x_j problem_ = problem_.substitute_variables(variables={i: (j, -1)}) if problem_.status == QuadraticProgram.Status.INFEASIBLE: raise QiskitOptimizationError('Infeasible due to variable substitution') replacements[x_i] = (x_j, -1) # solve remaining problem result = self._min_num_vars_optimizer.solve(problem_) # unroll replacements var_values = {} for i, x in enumerate(problem_.variables): var_values[x.name] = result.x[i] def find_value(x, replacements, var_values): if x in var_values: # if value for variable is known, return it return var_values[x] elif x in replacements: # get replacement for variable (y, sgn) = replacements[x] # find details for replacing variable value = find_value(y, replacements, var_values) # construct, set, and return new value var_values[x] = value if sgn == 1 else 1 - value return var_values[x] else: raise QiskitOptimizationError('Invalid values!') # loop over all variables to set their values for x_i in problem_ref.variables: if x_i.name not in var_values: find_value(x_i.name, replacements, var_values) # build history before any translations are applied # min_eigen_results is an empty list if history is set to NO or LAST. history = (min_eigen_results, None if self._history == IntermediateResult.NO_ITERATIONS else result) # construct result x_v = [var_values[x_aux.name] for x_aux in problem_ref.variables] fval = result.fval result = OptimizationResult(x=x_v, fval=fval, variables=problem_ref.variables) result = self._qubo_converter.interpret(result) return RecursiveMinimumEigenOptimizationResult(x=result.x, fval=result.fval, variables=result.variables, replacements=replacements, history=history) def _find_strongest_correlation(self, correlations): # get absolute values and set diagonal to -1 to make sure maximum is always on off-diagonal abs_correlations = np.abs(correlations) for i in range(len(correlations)): abs_correlations[i, i] = -1 # get index of maximum (by construction on off-diagonal) m_max = np.argmax(abs_correlations.flatten()) # translate back to indices i = int(m_max // len(correlations)) j = int(m_max - i*len(correlations)) return (i, j)
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
13,424
Defines whether the intermediate results of :class:`~qiskit.optimization.algorithms.RecursiveMinimumEigenOptimizer` at each iteration should be stored and returned to the end user. Recursive Eigen Optimizer Result. A meta-algorithm that applies a recursive optimization. The recursive minimum eigen optimizer applies a recursive optimization on top of :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer`. The algorithm is introduced in [1]. Examples: Outline of how to use this class: .. code-block:: from qiskit.aqua.algorithms import QAOA from qiskit.optimization.problems import QuadraticProgram from qiskit.optimization.algorithms import RecursiveMinimumEigenOptimizer problem = QuadraticProgram() # specify problem here # specify minimum eigen solver to be used, e.g., QAOA qaoa = QAOA(...) optimizer = RecursiveMinimumEigenOptimizer(qaoa) result = optimizer.solve(problem) References: [1]: Bravyi et al. (2019), Obstacles to State Preparation and Variational Optimization from Symmetry Protection. http://arxiv.org/abs/1910.08980. Constructs an instance of the result class. Args: x: the optimal value found in the optimization. fval: the optimal function value. variables: the list of variables of the optimization problem. replacements: a dictionary of substituted variables. Key is a variable being substituted, value is a tuple of substituting variable and a weight, either 1 or -1. history: a tuple containing intermediate results. The first element is a list of :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step via `min_num_vars_optimizer`. Initializes the recursive minimum eigen optimizer. This initializer takes a ``MinimumEigenOptimizer``, the parameters to specify until when to to apply the iterative scheme, and the optimizer to be applied once the threshold number of variables is reached. Args: min_eigen_optimizer: The eigen optimizer to use in every iteration. min_num_vars: The minimum number of variables to apply the recursive scheme. If this threshold is reached, the min_num_vars_optimizer is used. min_num_vars_optimizer: This optimizer is used after the recursive scheme for the problem with the remaining variables. penalty: The factor that is used to scale the penalty terms corresponding to linear equality constraints. history: Whether the intermediate results are stored. Default value is :py:obj:`~IntermediateResult.LAST_ITERATION`. Raises: QiskitOptimizationError: In case of invalid parameters (num_min_vars < 1). Checks whether a given problem can be solved with this optimizer. Checks whether the given problem is compatible, i.e., whether the problem can be converted to a QUBO, and otherwise, returns a message explaining the incompatibility. Args: problem: The optimization problem to check compatibility. Returns: A message describing the incompatibility. Returns intermediate results. The first element is a list of :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step via `min_num_vars_optimizer`. Returns a dictionary of substituted variables. Key is a variable being substituted, value is a tuple of substituting variable and a weight, either 1 or -1. Tries to solve the given problem using the recursive optimizer. Runs the optimizer to try to solve the optimization problem. Args: problem: The problem to be solved. Returns: The result of the optimizer applied to the problem. Raises: QiskitOptimizationError: Incompatible problem. QiskitOptimizationError: Infeasible due to variable substitution A recursive minimal eigen optimizer in Qiskit's optimization module. -*- coding: utf-8 -*- This code is part of Qiskit. (C) Copyright IBM 2020. This code is licensed under the Apache License, Version 2.0. You may obtain a copy of this license in the LICENSE.txt file in the root directory of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. Any modifications or derivative works of this code must retain this copyright notice, and modified files need to carry a notice indicating that they have been altered from the originals. convert problem to QUBO, this implicitly checks if the problem is compatible run recursive optimization until the resulting problem is small enough type: Dict[str, Tuple[str, int]] type: List[MinimumEigenOptimizationResult] solve current problem with optimizer type: MinimumEigenOptimizationResult analyze results to get strongest correlation set x_i = x_j set x_i = 1 - x_j, this is done in two steps: 1. set x_i = 1 + x_i 2. set x_i = -x_j 1a. get additional offset 1b. get additional linear part set new coefficient if not too small 2. replace x_i by -x_j solve remaining problem unroll replacements if value for variable is known, return it get replacement for variable find details for replacing variable construct, set, and return new value loop over all variables to set their values build history before any translations are applied min_eigen_results is an empty list if history is set to NO or LAST. construct result get absolute values and set diagonal to -1 to make sure maximum is always on off-diagonal get index of maximum (by construction on off-diagonal) translate back to indices
5,794
en
0.735696
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Base class for a stock configuration. @author: rajsaswa """ class StockConfig: def __init__(self): pass def get_stock_url(self): pass
stock_notifier/stock_config/stock_config.py
220
Base class for a stock configuration. @author: rajsaswa !/usr/bin/env python2 -*- coding: utf-8 -*-
101
en
0.495677
import warnings import pandas as pd from ..config import Config from ..backend import Backend from ..backend import PYOORB from ..backend import FINDORB from ..backend import MJOLNIR __all__ = [ "generateEphemeris" ] def generateEphemeris( orbits, observers, backend="MJOLNIR", backend_kwargs={}, test_orbit=None, threads=Config.NUM_THREADS, chunk_size=1 ): """ Generate ephemeris for the orbits and the given observatories. Parameters ---------- orbits : `~numpy.ndarray` (N, 6) Orbits for which to generate ephemeris. If backend is 'THOR', then these orbits must be expressed as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be expressed in keplerian, cometary or cartesian elements. observers : dict A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values. Or a data frame with observatory codes, observation times (in UTC), and the observer's heliocentric ecliptic state. The expected data frame columns are obs_x, obs_y, obs_y and optionally the velocity columns obs_vx, obs_vy, obs_vz. If no velocities are not correctly given, then sky-plane velocities will all be zero. (See: `~thor.observatories.getObserverState`) backend : {'MJOLNIR', 'PYOORB'}, optional Which backend to use. backend_kwargs : dict, optional Settings and additional parameters to pass to selected backend. Returns ------- ephemeris : `~pandas.DataFrame` (N x M, 21) or (N x M, 18) A DataFrame containing the generated ephemeris. """ if backend == "MJOLNIR": backend = MJOLNIR(**backend_kwargs) elif backend == "PYOORB": backend = PYOORB(**backend_kwargs) elif backend == "FINDORB": backend = FINDORB(**backend_kwargs) elif isinstance(backend, Backend): backend = backend if len(backend_kwargs) > 0: warnings.warn("backend_kwargs will be ignored since a instantiated backend class has been given.") else: err = ( "backend should be one of 'MJOLNIR', 'PYOORB', 'FINDORB' or an instantiated Backend class" ) raise ValueError(err) ephemeris = backend.generateEphemeris( orbits, observers, test_orbit=test_orbit, threads=threads, chunk_size=chunk_size ) ephemeris.sort_values( by=["orbit_id", "observatory_code", "mjd_utc"], inplace=True ) ephemeris.reset_index( inplace=True, drop=True ) return ephemeris
thor/orbits/ephemeris.py
2,706
Generate ephemeris for the orbits and the given observatories. Parameters ---------- orbits : `~numpy.ndarray` (N, 6) Orbits for which to generate ephemeris. If backend is 'THOR', then these orbits must be expressed as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be expressed in keplerian, cometary or cartesian elements. observers : dict A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values. Or a data frame with observatory codes, observation times (in UTC), and the observer's heliocentric ecliptic state. The expected data frame columns are obs_x, obs_y, obs_y and optionally the velocity columns obs_vx, obs_vy, obs_vz. If no velocities are not correctly given, then sky-plane velocities will all be zero. (See: `~thor.observatories.getObserverState`) backend : {'MJOLNIR', 'PYOORB'}, optional Which backend to use. backend_kwargs : dict, optional Settings and additional parameters to pass to selected backend. Returns ------- ephemeris : `~pandas.DataFrame` (N x M, 21) or (N x M, 18) A DataFrame containing the generated ephemeris.
1,177
en
0.6157
#! -*- coding:utf-8 -*- # 评估脚本 # 数据集:IFLYTEK' 长文本分类 (https://github.com/CLUEbenchmark/CLUE) import json import numpy as np from bert4keras.backend import keras, set_gelu from bert4keras.tokenizers import Tokenizer from bert4keras.models import build_transformer_model from bert4keras.optimizers import Adam from bert4keras.snippets import sequence_padding, DataGenerator from bert4keras.snippets import open from keras.layers import * set_gelu('tanh') # 切换tanh版本 num_classes = 119 maxlen = 128 batch_size = 32 # RoBERTa small config_path = '/root/kg/bert/chinese_roberta_L-6_H-384_A-12/bert_config.json' checkpoint_path = '/root/kg/bert/chinese_roberta_L-6_H-384_A-12/bert_model.ckpt' dict_path = '/root/kg/bert/chinese_roberta_L-6_H-384_A-12/vocab.txt' model_type = 'bert' """ # albert small config_path = '/root/kg/bert/albert_small_zh_google/albert_config.json' checkpoint_path = '/root/kg/bert/albert_small_zh_google/albert_model.ckpt' dict_path = '/root/kg/bert/albert_small_zh_google/vocab.txt' model_type = 'albert' # RoBERTa tiny config_path = '/root/kg/bert/chinese_roberta_L-4_H-312_A-12/bert_config.json' checkpoint_path = '/root/kg/bert/chinese_roberta_L-4_H-312_A-12/bert_model.ckpt' dict_path = '/root/kg/bert/chinese_roberta_L-4_H-312_A-12/vocab.txt' model_type = 'bert' # albert tiny config_path = '/root/kg/bert/albert_tiny_zh_google/albert_config.json' checkpoint_path = '/root/kg/bert/albert_tiny_zh_google/albert_model.ckpt' dict_path = '/root/kg/bert/albert_tiny_zh_google/vocab.txt' model_type = 'albert' """ def load_data(filename): D = [] with open(filename) as f: for i, l in enumerate(f): l = json.loads(l) text, label = l['sentence'], l['label'] D.append((text, int(label))) return D # 加载数据集 train_data = load_data('/root/CLUE-master/baselines/CLUEdataset/iflytek/train.json') valid_data = load_data('/root/CLUE-master/baselines/CLUEdataset/iflytek/dev.json') # 建立分词器 tokenizer = Tokenizer(dict_path, do_lower_case=True) class data_generator(DataGenerator): """数据生成器 """ def __iter__(self, random=False): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for is_end, (text, label) in self.sample(random): token_ids, segment_ids = tokenizer.encode(text, max_length=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) if len(batch_token_ids) == self.batch_size or is_end: batch_token_ids = sequence_padding(batch_token_ids) batch_segment_ids = sequence_padding(batch_segment_ids) batch_labels = sequence_padding(batch_labels) yield [batch_token_ids, batch_segment_ids], batch_labels batch_token_ids, batch_segment_ids, batch_labels = [], [], [] # 加载预训练模型 bert = build_transformer_model( config_path=config_path, checkpoint_path=checkpoint_path, model=model_type, return_keras_model=False, ) output = Lambda(lambda x: x[:, 0])(bert.model.output) output = Dense(units=num_classes, activation='softmax', kernel_initializer=bert.initializer)(output) model = keras.models.Model(bert.model.input, output) model.summary() model.compile( loss='sparse_categorical_crossentropy', optimizer=Adam(5e-5), metrics=['accuracy'], ) # 转换数据集 train_generator = data_generator(train_data, batch_size) valid_generator = data_generator(valid_data, batch_size) def evaluate(data): total, right = 0., 0. for x_true, y_true in data: y_pred = model.predict(x_true).argmax(axis=1) y_true = y_true[:, 0] total += len(y_true) right += (y_true == y_pred).sum() return right / total class Evaluator(keras.callbacks.Callback): def __init__(self): self.best_val_acc = 0. def on_epoch_end(self, epoch, logs=None): val_acc = evaluate(valid_generator) if val_acc > self.best_val_acc: self.best_val_acc = val_acc model.save_weights('best_model.weights') print(u'val_acc: %.5f, best_val_acc: %.5f\n' % (val_acc, self.best_val_acc)) evaluator = Evaluator() model.fit_generator(train_generator.forfit(), steps_per_epoch=len(train_generator), epochs=50, callbacks=[evaluator])
examples/task_iflytek.py
4,497
数据生成器 ! -*- coding:utf-8 -*- 评估脚本 数据集:IFLYTEK' 长文本分类 (https://github.com/CLUEbenchmark/CLUE) 切换tanh版本 RoBERTa small 加载数据集 建立分词器 加载预训练模型 转换数据集
147
zh
0.932504
# Athena Health Preliminary Test - II #!/bin/python3 import math import os import random import re import sys # # Complete the 'moves' function below. # # The function is expected to return an INTEGER. # The function accepts INTEGER_ARRAY arr as parameter. # def moves(arr): ee=e=0 for i in range(0,len(arr)): if(arr[i]%2==0): e+=1 for i in range(0,len(arr)): if(arr[i]%2==0): if(i>=e): ee+=1 return ee if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') arr_count = int(input().strip()) arr = [] for _ in range(arr_count): arr_item = int(input().strip()) arr.append(arr_item) result = moves(arr) fptr.write(str(result) + '\n') fptr.close()
0_Companies/ATHENA2.py
759
Athena Health Preliminary Test - II!/bin/python3 Complete the 'moves' function below. The function is expected to return an INTEGER. The function accepts INTEGER_ARRAY arr as parameter.
185
en
0.638918
import os import requests from os.path import join, isfile from nerblackbox.modules.datasets.formatter.base_formatter import BaseFormatter class CoNLL2003Formatter(BaseFormatter): def __init__(self): ner_dataset = "conll2003" ner_tag_list = ["PER", "ORG", "LOC", "MISC"] super().__init__(ner_dataset, ner_tag_list) #################################################################################################################### # ABSTRACT BASE METHODS #################################################################################################################### def get_data(self, verbose: bool): """ I: get data ----------- :param verbose: [bool] :return: - """ url_base = "https://raw.githubusercontent.com/patverga/torch-ner-nlp-from-scratch/master/data/conll2003/" targets = ["eng.train", "eng.testa", "eng.testb"] for target in targets: target_file = join(self.dataset_path, target) # fetch tgz from url if isfile(target_file): if verbose: print(f".. file at {target_file} already exists") else: url = url_base + target myfile = requests.get(url, allow_redirects=True) open(target_file, "wb").write(myfile.content) if verbose: print(f".. file fetched from {url} and saved at {target_file}") def create_ner_tag_mapping(self): """ II: customize ner_training tag mapping if wanted ------------------------------------- :return: ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data """ return dict() def format_data(self): """ III: format data ---------------- :return: - """ for phase in ["train", "val", "test"]: rows = self._read_original_file(phase) self._write_formatted_csv(phase, rows) def resplit_data(self, val_fraction: float): """ IV: resplit data ---------------- :param val_fraction: [float] :return: - """ # train -> train df_train = self._read_formatted_csvs(["train"]) self._write_final_csv("train", df_train) # val -> val df_val = self._read_formatted_csvs(["val"]) self._write_final_csv("val", df_val) # test -> test df_test = self._read_formatted_csvs(["test"]) self._write_final_csv("test", df_test) #################################################################################################################### # HELPER: READ ORIGINAL #################################################################################################################### def _read_original_file(self, phase): """ III: format data --------------------------------------------- :param phase: [str] 'train' or 'test' :return: _rows: [list] of [list] of [str], e.g. [[], ['Inger', 'PER'], ['säger', '0'], ..] """ file_name = { "train": "eng.train", "val": "eng.testa", "test": "eng.testb", } file_path_original = join(self.dataset_path, file_name[phase]) _rows = list() if os.path.isfile(file_path_original): with open(file_path_original) as f: for i, row in enumerate(f.readlines()): _rows.append(row.strip().split()) print(f"\n> read {file_path_original}") _rows = [ [row[0], row[-1]] if (len(row) == 4 and row[0] != "-DOCSTART-") else list() for row in _rows ] return _rows
nerblackbox/modules/datasets/formatter/conll2003_formatter.py
3,832
III: format data --------------------------------------------- :param phase: [str] 'train' or 'test' :return: _rows: [list] of [list] of [str], e.g. [[], ['Inger', 'PER'], ['säger', '0'], ..] II: customize ner_training tag mapping if wanted ------------------------------------- :return: ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data III: format data ---------------- :return: - I: get data ----------- :param verbose: [bool] :return: - IV: resplit data ---------------- :param val_fraction: [float] :return: - ABSTRACT BASE METHODS fetch tgz from url train -> train val -> val test -> test HELPER: READ ORIGINAL
661
en
0.268324
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os from spack.package import * class Visit(CMakePackage): """VisIt is an Open Source, interactive, scalable, visualization, animation and analysis tool. See comments in VisIt's package.py for tips about building VisIt with spack. Building VisIt with Spack is still experimental and many standard features are likely disabled LINUX------------------------------------------------------------------- spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 LINUX-W/O-OPENGL-------------------------------------------------------- spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 \\ ^mesa+opengl MACOS------------------------------------------------------------------- spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 \\ ^qt~framework """ ############################ # Suggestions for building: ############################ # cyrush note: # # Out of the box, VisIt's python 2 requirement will cause # spack spec constraint errors due Qt + Mesa build # dependencies. # # You can avoid this using: # # linux: # spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 # # linux w/o opengl: (add mesa as opengl if system lacks system opengl ) # # spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 \ # ^mesa+opengl # # macOS: # spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 \ # ^qt~framework # # Rpath issues undermine qwt (not qt) when a build as a framework # VisIt's osxfixup resolves this for us in other cases, # but we can't use osxfixup with spack b/c it will undermine other libs. # # Even with these changes, VisIt's Python CLI does not work on macOS, # there is a linking issue related to OpenSSL. # (dyld: Symbol not found: _GENERAL_NAME_free - which comes from OpenSSL) # ############################ homepage = "https://wci.llnl.gov/simulation/computer-codes/visit/" git = "https://github.com/visit-dav/visit.git" url = "https://github.com/visit-dav/visit/releases/download/v3.2.1/visit3.2.1.tar.gz" tags = ['radiuss'] maintainers = ['cyrush'] extendable = True executables = ['^visit$'] version('develop', branch='develop') version('3.2.2', sha256='d19ac24c622a3bc0a71bc9cd6e5c9860e43f39e3279672129278b6ebce8d0ead') version('3.2.1', sha256='779d59564c63f31fcbfeff24b14ddd6ac941b3bb7d671d31765a770d193f02e8') version('3.1.1', sha256='0b60ac52fd00aff3cf212a310e36e32e13ae3ca0ddd1ea3f54f75e4d9b6c6cf0') version('3.0.1', sha256='a506d4d83b8973829e68787d8d721199523ce7ec73e7594e93333c214c2c12bd') root_cmakelists_dir = 'src' generator = "Ninja" variant('gui', default=True, description='Enable VisIt\'s GUI') variant('osmesa', default=False, description='Use OSMesa for off-screen CPU rendering') variant('adios2', default=True, description='Enable ADIOS2 file format') variant('hdf5', default=True, description='Enable HDF5 file format') variant('silo', default=True, description='Enable Silo file format') variant('python', default=True, description='Enable Python support') variant('mpi', default=True, description='Enable parallel engine') patch('spack-changes-3.1.patch', when="@3.1.0:,develop") patch('spack-changes-3.0.1.patch', when="@3.0.1") patch('nonframework-qwt.patch', when='^qt~framework platform=darwin') patch('parallel-hdf5.patch', when='+hdf5+mpi') # Exactly one of 'gui' or 'osmesa' has to be enabled conflicts('+gui', when='+osmesa') ############################################# # Full List of dependencies from build_visit ############################################# # cyrush note: # I added these here to give folks details # to help eventually build up to full # support for visit ############################################# # ===================================== # core: # ===================================== # cmake (build) # vtk # qt # qwt # python # mpi # # ===================================== # rendering (optional): # ===================================== # icet # vtk-m # vtk-h # llvm # mesagl # osmesa # tbb # embree # ispc # ospray # # ===================================== # python modules: # ===================================== # numpy # pillow # mpi4py # seedme # sphinx (build, docs) # sphinx rtd theme (build, docs) # pyqt (visit support deprecated) # pyside (note: we want pyside 2) # # ===================================== # testing related: # ===================================== # p7zip (build, test) # # ===================================== # io libs: # ===================================== # adios # adios2 # advio # boost # boxlib # cfitsio # cgns # conduit # damaris # fastbit # fastquery # gdal # h5part # hdf4 # hdf5 # mdsplus # mfem # mili # moab # mxml # nektarpp # netcdf # openexr # pidx # silo # stripack # szip # tbb # uintah # xdmf # xercesc # xsd # zlib # # ===================================== depends_on('cmake@3.14.7:', type='build') depends_on('ninja', type='build') depends_on('mpi', when='+mpi') # VTK flavors depends_on('vtk@8.1:8 +opengl2') depends_on('vtk +osmesa', when='+osmesa') depends_on('vtk +qt', when='+gui') depends_on('vtk +python', when='+python') depends_on('vtk +mpi', when='+mpi') depends_on('vtk ~mpi', when='~mpi') # Necessary VTK patches depends_on('vtk', patches=[patch('vtk_compiler_visibility.patch')]) depends_on('vtk', patches=[patch('vtk_rendering_opengl2_x11.patch')], when='~osmesa platform=linux') depends_on('vtk', patches=[patch('vtk_wrapping_python_x11.patch')], when='+python') depends_on('glu', when='~osmesa') depends_on('mesa-glu+osmesa', when='+osmesa') # VisIt doesn't work with later versions of qt. depends_on('qt+gui+opengl@5:5.14', when='+gui') depends_on('qwt', when='+gui') # python@3.8 doesn't work with VisIt. depends_on('python@3.2:3.7', when='+python') extends('python', when='+python') # VisIt uses the hdf5 1.8 api # set the API version later on down in setup_build_environment depends_on('hdf5@1.8:', when='+hdf5') depends_on('hdf5+mpi', when='+hdf5+mpi') depends_on('hdf5~mpi', when='+hdf5~mpi') # VisIt uses Silo's 'ghost zone' data structures, which are only available # in v4.10+ releases: https://wci.llnl.gov/simulation/computer-codes/silo/releases/release-notes-4.10 depends_on('silo@4.10: +shared', when='+silo') depends_on('silo+hdf5', when='+silo+hdf5') depends_on('silo~hdf5', when='+silo~hdf5') depends_on('silo+mpi', when='+silo+mpi') depends_on('silo~mpi', when='+silo~mpi') depends_on('adios2@2.6:', when='+adios2') depends_on('adios2+hdf5', when='+adios2+hdf5') depends_on('adios2~hdf5', when='+adios2~hdf5') depends_on('adios2+mpi', when='+adios2+mpi') depends_on('adios2~mpi', when='+adios2~mpi') depends_on('adios2+python', when='+adios2+python') depends_on('adios2~python', when='+adios2~python') depends_on('zlib') @when('@3:,develop') def patch(self): # Some of VTK's targets don't create explicit libraries, so there is no # 'vtktiff'. Instead, replace with the library variable defined from # VTK's module flies (e.g. lib/cmake/vtk-8.1/Modules/vtktiff.cmake) for filename in find('src', 'CMakeLists.txt'): filter_file(r'\bvtk(tiff|jpeg|png)', r'${vtk\1_LIBRARIES}', filename) def flag_handler(self, name, flags): if name in ('cflags', 'cxxflags'): # NOTE: This is necessary in order to allow VisIt to compile a couple # of lines of code with 'const char*' to/from 'char*' conversions. if '@3:%gcc' in self.spec: flags.append('-fpermissive') # VisIt still uses the hdf5 1.8 api if '+hdf5' in self.spec and '@1.10:' in self.spec['hdf5']: flags.append('-DH5_USE_18_API') return (flags, None, None) def cmake_args(self): spec = self.spec args = [ self.define('CMAKE_POSITION_INDEPENDENT_CODE', True), self.define('VTK_MAJOR_VERSION', spec['vtk'].version[0]), self.define('VTK_MINOR_VERSION', spec['vtk'].version[1]), self.define('VISIT_VTK_DIR', spec['vtk'].prefix), self.define('VISIT_ZLIB_DIR', spec['zlib'].prefix), self.define('VISIT_USE_GLEW', False), self.define('VISIT_CONFIG_SITE', 'NONE'), self.define('VISIT_INSTALL_THIRD_PARTY', True), ] if '@3.1: platform=darwin' in spec: args.append(self.define('FIXUP_OSX', False)) if '+python' in spec: args.extend([ self.define('VISIT_PYTHON_FILTERS', True), self.define('VISIT_PYTHON_SCRIPTING', True), self.define('PYTHON_DIR', spec['python'].home), ]) else: args.extend([ self.define('VISIT_PYTHON_FILTERS', False), self.define('VISIT_PYTHON_SCRIPTING', False), ]) if '+gui' in spec: qt_bin = spec['qt'].prefix.bin qmake_exe = os.path.join(qt_bin, 'qmake') args.extend([ self.define('VISIT_SERVER_COMPONENTS_ONLY', False), self.define('VISIT_ENGINE_ONLY', False), self.define('VISIT_LOC_QMAKE_EXE', qmake_exe), self.define('VISIT_QT_DIR', spec['qt'].prefix), self.define('VISIT_QWT_DIR', spec['qwt'].prefix), ]) else: args.extend([ self.define('VISIT_SERVER_COMPONENTS_ONLY', True), self.define('VISIT_ENGINE_ONLY', True), ]) # No idea why this is actually needed if '^mesa' in spec: args.append(self.define('VISIT_MESAGL_DIR', spec['mesa'].prefix)) if '+llvm' in spec['mesa']: args.append(self.define('VISIT_LLVM_DIR', spec['libllvm'].prefix)) if '+hdf5' in spec: args.append(self.define('VISIT_HDF5_DIR', spec['hdf5'].prefix)) if '+mpi' in spec and '+mpi' in spec['hdf5']: args.append(self.define('VISIT_HDF5_MPI_DIR', spec['hdf5'].prefix)) if '+silo' in spec: args.append(self.define('VISIT_SILO_DIR', spec['silo'].prefix)) if '+mpi' in spec: args.extend([ self.define('VISIT_PARALLEL', True), self.define('VISIT_MPI_COMPILER', spec['mpi'].mpicxx), ]) else: args.append(self.define('VISIT_PARALLEL', False)) return args # https://spack.readthedocs.io/en/latest/packaging_guide.html?highlight=executables#making-a-package-discoverable-with-spack-external-find # Here we are only able to determine the latest version # despite VisIt may have multiple versions @classmethod def determine_version(cls, exe): output = Executable(exe)('-version', output=str, error=str) match = re.search(r'\s*(\d[\d\.]+)\.', output) return match.group(1) if match else None
var/spack/repos/builtin/packages/visit/package.py
11,997
VisIt is an Open Source, interactive, scalable, visualization, animation and analysis tool. See comments in VisIt's package.py for tips about building VisIt with spack. Building VisIt with Spack is still experimental and many standard features are likely disabled LINUX------------------------------------------------------------------- spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 LINUX-W/O-OPENGL-------------------------------------------------------- spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 \ ^mesa+opengl MACOS------------------------------------------------------------------- spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 \ ^qt~framework Copyright 2013-2022 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) Suggestions for building: cyrush note: Out of the box, VisIt's python 2 requirement will cause spack spec constraint errors due Qt + Mesa build dependencies. You can avoid this using: linux: spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 linux w/o opengl: (add mesa as opengl if system lacks system opengl ) spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 \ ^mesa+opengl macOS: spack install visit ^python+shared ^glib@2.56.3 ^py-setuptools@44.1.0 \ ^qt~framework Rpath issues undermine qwt (not qt) when a build as a framework VisIt's osxfixup resolves this for us in other cases, but we can't use osxfixup with spack b/c it will undermine other libs. Even with these changes, VisIt's Python CLI does not work on macOS, there is a linking issue related to OpenSSL. (dyld: Symbol not found: _GENERAL_NAME_free - which comes from OpenSSL) Exactly one of 'gui' or 'osmesa' has to be enabled Full List of dependencies from build_visit cyrush note: I added these here to give folks details to help eventually build up to full support for visit ===================================== core: ===================================== cmake (build) vtk qt qwt python mpi ===================================== rendering (optional): ===================================== icet vtk-m vtk-h llvm mesagl osmesa tbb embree ispc ospray ===================================== python modules: ===================================== numpy pillow mpi4py seedme sphinx (build, docs) sphinx rtd theme (build, docs) pyqt (visit support deprecated) pyside (note: we want pyside 2) ===================================== testing related: ===================================== p7zip (build, test) ===================================== io libs: ===================================== adios adios2 advio boost boxlib cfitsio cgns conduit damaris fastbit fastquery gdal h5part hdf4 hdf5 mdsplus mfem mili moab mxml nektarpp netcdf openexr pidx silo stripack szip tbb uintah xdmf xercesc xsd zlib ===================================== VTK flavors Necessary VTK patches VisIt doesn't work with later versions of qt. python@3.8 doesn't work with VisIt. VisIt uses the hdf5 1.8 api set the API version later on down in setup_build_environment VisIt uses Silo's 'ghost zone' data structures, which are only available in v4.10+ releases: https://wci.llnl.gov/simulation/computer-codes/silo/releases/release-notes-4.10 Some of VTK's targets don't create explicit libraries, so there is no 'vtktiff'. Instead, replace with the library variable defined from VTK's module flies (e.g. lib/cmake/vtk-8.1/Modules/vtktiff.cmake) NOTE: This is necessary in order to allow VisIt to compile a couple of lines of code with 'const char*' to/from 'char*' conversions. VisIt still uses the hdf5 1.8 api No idea why this is actually needed https://spack.readthedocs.io/en/latest/packaging_guide.html?highlight=executablesmaking-a-package-discoverable-with-spack-external-find Here we are only able to determine the latest version despite VisIt may have multiple versions
4,024
en
0.733623
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('data', '0002_auto_20141114_1935'), ] operations = [ migrations.RemoveField( model_name='registryentry', name='fixity_algorithm', ), migrations.RemoveField( model_name='registryentry', name='fixity_value', ), migrations.RemoveField( model_name='registryentry', name='last_fixity_date', ), migrations.RemoveField( model_name='registryentry', name='published', ), ]
dpnode/dpn/data/migrations/0003_auto_20141117_2011.py
715
-*- coding: utf-8 -*-
21
en
0.767281
import math import numpy as np from enum import IntEnum class Mode(IntEnum): CUSTOM = 0 EQUAL = 1 GAUSS = 2 GAUSS_SYM = 3 PYRAMID = 4 PYRAMID_SYM = 5 SIVEROO_1 = 6 SIVEROO_2 = 7 #This function will return an list of value, like below: # [0,1,2,3,...,n] -> [a,...,b] def scaleRange(n: int, a: int, b: int): return [(x*(b-a)/(n-1))+a for x in range(0,n)] def equal(n: int): return [1/n]*n def gauss(n: int): r = range(n,0,-1) val = [math.exp(-(2.0*x/n)**2) for x in r] val = val/np.sum(val) return val def gauss_sym(n: int): n = n/2 r = range(int(n),-math.ceil(n),-1) val = ([math.exp(-(2.0*x/(n))**2) for x in r]) val = val/np.sum(val) return val def pyramid(n: int): r = range(1,n+1) val = [x/n for x in r] val = val/np.sum(val) return val def pyramid_sym(n: int): r = range(0,n) val = [(n/2)-abs(x-(n-1)/2) for x in r] val = val/np.sum(val) return val def siveroo1(n: int): r = scaleRange(n,-3,0.1) val = [math.floor(3*math.exp(-(x/1.9)**2))/3+0.1 for x in r] val = val/np.sum(val) return val # this function will stretch the given array (w) to a specific length (n) # example : n = 10, w = [1,2] # result : val = [1,1,1,1,1,2,2,2,2,2] , flip it, and then normalize it so its sum is equal to 1 def stretch(n: int, w: int): r = scaleRange(n,0,len(w)-0.1) val = [] idx = [math.floor(x) for x in r] for x in range(0,n): index = int(idx[x]) val.append(w[index]) val = val/np.sum(val) return val def null(n: int): return [0]*n def get_weight(mode: Mode, count: int): if count == 1: return [1.0] else: return { Mode.EQUAL : equal(count), Mode.GAUSS : gauss(count), Mode.GAUSS_SYM : gauss_sym(count), Mode.PYRAMID : pyramid(count), Mode.PYRAMID_SYM : pyramid_sym(count), Mode.SIVEROO_1 : siveroo1(count), Mode.SIVEROO_2 : stretch(count,[1,3,3,2,2]) }.get(mode, [1, 0]) # fallback to [1,0] if fucked up def modeName(mode: Mode): return { Mode.EQUAL : "[1] Equal", Mode.GAUSS : "[2] Gaussian Asymmetric", Mode.GAUSS_SYM : "[3] Gaussian Symmetric", Mode.PYRAMID : "[4] Pyramid Asymmetric", Mode.PYRAMID_SYM : "[5] Pyramid Symmetric", Mode.SIVEROO_1 : "[6] Siveroo's Preset I", Mode.SIVEROO_2 : "[7] Siveroo's Preset II" }[mode]
weights.py
2,657
This function will return an list of value, like below: [0,1,2,3,...,n] -> [a,...,b] this function will stretch the given array (w) to a specific length (n) example : n = 10, w = [1,2] result : val = [1,1,1,1,1,2,2,2,2,2] , flip it, and then normalize it so its sum is equal to 1 fallback to [1,0] if fucked up
310
en
0.725512
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkiot.endpoint import endpoint_data class CreateRuleRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateRule') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_Select(self): return self.get_query_params().get('Select') def set_Select(self,Select): self.add_query_param('Select',Select) def get_RuleDesc(self): return self.get_query_params().get('RuleDesc') def set_RuleDesc(self,RuleDesc): self.add_query_param('RuleDesc',RuleDesc) def get_ShortTopic(self): return self.get_query_params().get('ShortTopic') def set_ShortTopic(self,ShortTopic): self.add_query_param('ShortTopic',ShortTopic) def get_ResourceGroupId(self): return self.get_query_params().get('ResourceGroupId') def set_ResourceGroupId(self,ResourceGroupId): self.add_query_param('ResourceGroupId',ResourceGroupId) def get_DataType(self): return self.get_query_params().get('DataType') def set_DataType(self,DataType): self.add_query_param('DataType',DataType) def get_IotInstanceId(self): return self.get_query_params().get('IotInstanceId') def set_IotInstanceId(self,IotInstanceId): self.add_query_param('IotInstanceId',IotInstanceId) def get_Where(self): return self.get_query_params().get('Where') def set_Where(self,Where): self.add_query_param('Where',Where) def get_TopicType(self): return self.get_query_params().get('TopicType') def set_TopicType(self,TopicType): self.add_query_param('TopicType',TopicType) def get_ProductKey(self): return self.get_query_params().get('ProductKey') def set_ProductKey(self,ProductKey): self.add_query_param('ProductKey',ProductKey) def get_Name(self): return self.get_query_params().get('Name') def set_Name(self,Name): self.add_query_param('Name',Name) def get_Topic(self): return self.get_query_params().get('Topic') def set_Topic(self,Topic): self.add_query_param('Topic',Topic)
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateRuleRequest.py
3,042
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
754
en
0.883564
#!/usr/bin/env python3 import uuid import random import datetime from faker import Factory fake = Factory.create() num_people = 1000 last_jump_start = datetime.datetime(2008, 9, 1) last_jump_end = datetime.datetime(2016, 8, 1) print('COPY members (uuid, name, email, phone_number, last_jump, created_at, updated_at) FROM stdin;') for i in range(0, num_people): member_uuid = str(uuid.uuid4()) name = fake.name() email = fake.email() phone_number = '+447' + str(random.randrange(100000000, 999999999, 1)) last_jump = fake.date_time_between_dates(datetime_start = last_jump_start, datetime_end = last_jump_end).strftime('%Y-%m-%d') created_at = fake.date_time_between_dates(datetime_start = last_jump_start, datetime_end = last_jump_end) updated_at = fake.date_time_between_dates(datetime_start = created_at, datetime_end = last_jump_end).strftime('%Y-%m-%d %H:%M:%S') print("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (member_uuid, name, email, phone_number, last_jump, created_at.strftime('%Y-%m-%d %H:%M:%S'), updated_at)) print('\\.')
test-data/members.py
1,048
!/usr/bin/env python3
21
fr
0.448822
import os from selfdrive.manager.process import PythonProcess, NativeProcess, DaemonProcess from selfdrive.hardware import EON, TICI, PC from common.op_params import opParams WEBCAM = os.getenv("USE_WEBCAM") is not None procs = [ DaemonProcess("manage_athenad", "selfdrive.athena.manage_athenad", "AthenadPid"), # due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption NativeProcess("camerad", "selfdrive/camerad", ["./camerad"], unkillable=True, driverview=True), NativeProcess("clocksd", "selfdrive/clocksd", ["./clocksd"]), NativeProcess("dmonitoringmodeld", "selfdrive/modeld", ["./dmonitoringmodeld"], enabled=(not PC or WEBCAM), driverview=True), NativeProcess("logcatd", "selfdrive/logcatd", ["./logcatd"]), NativeProcess("loggerd", "selfdrive/loggerd", ["./loggerd"]), NativeProcess("modeld", "selfdrive/modeld", ["./modeld"]), NativeProcess("proclogd", "selfdrive/proclogd", ["./proclogd"]), NativeProcess("sensord", "selfdrive/sensord", ["./sensord"], enabled=not PC, persistent=EON, sigkill=EON), NativeProcess("ubloxd", "selfdrive/locationd", ["./ubloxd"], enabled=(not PC or WEBCAM)), NativeProcess("ui", "selfdrive/ui", ["./ui"], persistent=True, watchdog_max_dt=(5 if TICI else None)), NativeProcess("soundd", "selfdrive/ui", ["./soundd"]), NativeProcess("locationd", "selfdrive/locationd", ["./locationd"]), NativeProcess("boardd", "selfdrive/boardd", ["./boardd"], enabled=False), PythonProcess("calibrationd", "selfdrive.locationd.calibrationd"), PythonProcess("controlsd", "selfdrive.controls.controlsd"), PythonProcess("deleter", "selfdrive.loggerd.deleter", persistent=True), PythonProcess("dmonitoringd", "selfdrive.monitoring.dmonitoringd", enabled=(not PC or WEBCAM), driverview=True), PythonProcess("logmessaged", "selfdrive.logmessaged", persistent=True), PythonProcess("pandad", "selfdrive.pandad", persistent=True), PythonProcess("paramsd", "selfdrive.locationd.paramsd"), PythonProcess("plannerd", "selfdrive.controls.plannerd"), PythonProcess("radard", "selfdrive.controls.radard"), PythonProcess("rtshield", "selfdrive.rtshield", enabled=EON), PythonProcess("thermald", "selfdrive.thermald.thermald", persistent=True), PythonProcess("timezoned", "selfdrive.timezoned", enabled=TICI, persistent=True), PythonProcess("tombstoned", "selfdrive.tombstoned", enabled=not PC, persistent=True), PythonProcess("uploader", "selfdrive.loggerd.uploader", persistent=True), ] if not opParams().get('update_behavior').lower().strip() == 'off' or os.path.exists('/data/no_ota_updates'): procs.append(PythonProcess("updated", "selfdrive.updated", enabled=not PC, persistent=True)) managed_processes = {p.name: p for p in procs}
selfdrive/manager/process_config.py
2,739
due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
85
en
0.616132
#!/usr/bin/env python # -*- coding: utf-8 -*- # ======================================= # File Name: test_ADMM.py # Purpose : test ADMM solver for primal # problem and dual problem # ======================================= from utils import get_params from ADMM_primal import ADMM_primal from ADMM_dual import ADMM_dual import numpy as np import argparse import time import sys """Parser """ parser = argparse.ArgumentParser() parser.add_argument('--n', type=int, default=64) parser.add_argument('--dataset', type=str, choices=['random', 'caffarelli', 'ellipse', 'DOTmark'], default='random') parser.add_argument('--imageclass', type=str, default='WhiteNoise') parser.add_argument('--method', type=str, choices=['primal', 'dual'], default='primal') parser.add_argument('--iters', type=int, default=10000) parser.add_argument('--alpha', type=float, default=1.618) parser.add_argument('--rho', type=float, default=1024) args = parser.parse_args() def main(): """Main routine """ print("\nTesting ADMM") print("====================") print("m = n : ", args.n) print("dataset: ", args.dataset) if args.dataset == 'DOTmark': print("class : ", args.imageclass) print("method : ", args.method) print("====================") mu, nu, c = get_params(args.n, args.dataset, args.imageclass) start = time.time() if args.method == 'primal': ADMM_primal(mu, nu, c, args.iters, args.rho, args.alpha) elif args.method == 'dual': ADMM_dual(mu, nu, c, args.iters, args.rho, args.alpha) t = time.time() - start print('time = %.5e' % t) if __name__ == '__main__': try: main() except KeyboardInterrupt: print (" Ctrl+C pressed...") sys.exit(1)
test_ADMM.py
1,765
Main routine !/usr/bin/env python -*- coding: utf-8 -*- ======================================= File Name: test_ADMM.py Purpose : test ADMM solver for primal problem and dual problem =======================================
240
en
0.610898
# -*- coding: utf-8 -*- """ solutions_by_text.sbt_token_generator ~~~~~~~~~~~~ This module handles security token generation. """ # @Author: sijanonly # @Date: 2018-03-19 10:57:26 # @Last Modified by: sijanonly # @Last Modified time: 2018-03-19 14:51:07 import json from urllib import parse import requests from .handle_exceptions import CustomException _base_url = 'https://{}.solutionsbytext.com/SBT.App.SetUp/RSServices/' def create_security_token(api_key, stage): """ Generates a security token for SBT API access. Args: api_key (string): API_KEY value provided by solutionsbytext stage (string): STAGE values (test or ui) Returns: string: SecurityToken returns by LoginAPIService Raises: CustomException: Raises while error during GET request. """ url = ''.join( [ _base_url.format(stage), 'LoginAPIService.svc/AuthenticateAPIKey?', parse.urlencode({'APIKey': api_key}) ] ) response_data = json.loads(requests.get(url).text) if response_data['AuthenticateAPIKeyResult'].get('ErrorCode') == 1402: raise CustomException( 'Error in generating security key.') if response_data['AuthenticateAPIKeyResult'].get('ErrorCode') == 1401: raise CustomException( 'SecurityToken generation is failed.') return response_data['AuthenticateAPIKeyResult'].get('SecurityToken')
solutions_by_text/sbt_token_generator.py
1,456
Generates a security token for SBT API access. Args: api_key (string): API_KEY value provided by solutionsbytext stage (string): STAGE values (test or ui) Returns: string: SecurityToken returns by LoginAPIService Raises: CustomException: Raises while error during GET request. solutions_by_text.sbt_token_generator ~~~~~~~~~~~~ This module handles security token generation. -*- coding: utf-8 -*- @Author: sijanonly @Date: 2018-03-19 10:57:26 @Last Modified by: sijanonly @Last Modified time: 2018-03-19 14:51:07
537
en
0.558515
# -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.urls import path from . import views urlpatterns = [ path("", views.DebugViewSet.as_view({"get": "list"}), name="debug.list_debug"), path("<str:id>/", views.DebugViewSet.as_view({"get": "retrieve"}), name="debug.detail"), ]
saas/backend/debug/urls.py
978
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*- coding: utf-8 -*-
707
en
0.86388
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Youngseokcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the abandontransaction RPC. The abandontransaction RPC marks a transaction and all its in-wallet descendants as abandoned which allows their inputs to be respent. It can be used to replace "stuck" or evicted transactions. It only works on transactions which are not included in a block and are not currently in the mempool. It has no effect on transactions which are already conflicted or abandoned. """ from test_framework.test_framework import YoungseokcoinTestFramework from test_framework.util import * class AbandonConflictTest(YoungseokcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [["-minrelaytxfee=0.00001"], []] def run_test(self): self.nodes[1].generate(100) sync_blocks(self.nodes) balance = self.nodes[0].getbalance() txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) sync_mempools(self.nodes) self.nodes[1].generate(1) sync_blocks(self.nodes) newbalance = self.nodes[0].getbalance() assert(balance - newbalance < Decimal("0.001")) #no more than fees lost balance = newbalance # Disconnect nodes so node0's transactions don't get into node1's mempool disconnect_nodes(self.nodes[0], 1) # Identify the 10ysc outputs nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10")) nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10")) nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10")) inputs =[] # spend 10ysc outputs from txA and txB inputs.append({"txid":txA, "vout":nA}) inputs.append({"txid":txB, "vout":nB}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998") outputs[self.nodes[1].getnewaddress()] = Decimal("5") signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) txAB1 = self.nodes[0].sendrawtransaction(signed["hex"]) # Identify the 14.99998ysc output nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998")) #Create a child tx spending AB1 and C inputs = [] inputs.append({"txid":txAB1, "vout":nAB}) inputs.append({"txid":txC, "vout":nC}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996") signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"]) # In mempool txs from self should increase balance from change newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996")) balance = newbalance # Restart the node with a higher min relay fee so the parent tx is no longer in mempool # TODO: redo with eviction self.stop_node(0) self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) # Verify txs no longer in either node's mempool assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(len(self.nodes[1].getrawmempool()), 0) # Not in mempool txs from self should only reduce balance # inputs are still spent, but change not received newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("24.9996")) # Unconfirmed received funds that are not in mempool, also shouldn't show # up in unconfirmed balance unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance() assert_equal(unconfbalance, newbalance) # Also shouldn't show up in listunspent assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]) balance = newbalance # Abandon original transaction and verify inputs are available again # including that the child tx was also abandoned self.nodes[0].abandontransaction(txAB1) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance + Decimal("30")) balance = newbalance # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned self.stop_node(0) self.start_node(0, extra_args=["-minrelaytxfee=0.00001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(self.nodes[0].getbalance(), balance) # But if its received again then it is unabandoned # And since now in mempool, the change is available # But its child tx remains abandoned self.nodes[0].sendrawtransaction(signed["hex"]) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998")) balance = newbalance # Send child tx again so its unabandoned self.nodes[0].sendrawtransaction(signed2["hex"]) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) balance = newbalance # Remove using high relay fee again self.stop_node(0) self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("24.9996")) balance = newbalance # Create a double spend of AB1 by spending again from only A's 10 output # Mine double spend from node 1 inputs =[] inputs.append({"txid":txA, "vout":nA}) outputs = {} outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999") tx = self.nodes[0].createrawtransaction(inputs, outputs) signed = self.nodes[0].signrawtransaction(tx) self.nodes[1].sendrawtransaction(signed["hex"]) self.nodes[1].generate(1) connect_nodes(self.nodes[0], 1) sync_blocks(self.nodes) # Verify that B and C's 10 YSC outputs are available for spending again because AB1 is now conflicted newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance + Decimal("20")) balance = newbalance # There is currently a minor bug around this and so this test doesn't work. See Issue #7315 # Invalidate the block with the double spend and B's 10 YSC output should no longer be available # Don't think C's should either self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) newbalance = self.nodes[0].getbalance() #assert_equal(newbalance, balance - Decimal("10")) self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer") self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315") self.log.info(str(balance) + " -> " + str(newbalance) + " ?") if __name__ == '__main__': AbandonConflictTest().main()
test/functional/abandonconflict.py
7,732
Test the abandontransaction RPC. The abandontransaction RPC marks a transaction and all its in-wallet descendants as abandoned which allows their inputs to be respent. It can be used to replace "stuck" or evicted transactions. It only works on transactions which are not included in a block and are not currently in the mempool. It has no effect on transactions which are already conflicted or abandoned. !/usr/bin/env python3 Copyright (c) 2014-2016 The Youngseokcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.no more than fees lost Disconnect nodes so node0's transactions don't get into node1's mempool Identify the 10ysc outputs spend 10ysc outputs from txA and txB Identify the 14.99998ysc outputCreate a child tx spending AB1 and C In mempool txs from self should increase balance from change Restart the node with a higher min relay fee so the parent tx is no longer in mempool TODO: redo with eviction Verify txs no longer in either node's mempool Not in mempool txs from self should only reduce balance inputs are still spent, but change not received Unconfirmed received funds that are not in mempool, also shouldn't show up in unconfirmed balance Also shouldn't show up in listunspent Abandon original transaction and verify inputs are available again including that the child tx was also abandoned Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned But if its received again then it is unabandoned And since now in mempool, the change is available But its child tx remains abandoned Send child tx again so its unabandoned Remove using high relay fee again Create a double spend of AB1 by spending again from only A's 10 output Mine double spend from node 1 Verify that B and C's 10 YSC outputs are available for spending again because AB1 is now conflicted There is currently a minor bug around this and so this test doesn't work. See Issue 7315 Invalidate the block with the double spend and B's 10 YSC output should no longer be available Don't think C's should eitherassert_equal(newbalance, balance - Decimal("10"))
2,195
en
0.92506
#You can either add the python package path. #sys.path.append(r'/mnt/e/GitHub_Design/Metalprot') from metalprot.search import search_selfcenter from metalprot.basic import filter import pickle import time import prody as pr ''' python /mnt/e/GitHub_Design/Metalprot/scrips/search_selfcenter/run_selfcenter_search.py ''' start_time = time.time() query_dir = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20211013/20211013_selfcenter/pickle_noCYS/' with open(query_dir + 'all_metal_vdm.pkl', 'rb') as f: query_all_metal = pickle.load(f) with open(query_dir + 'AAMetalPhiPsi.pkl', 'rb') as f: all_querys = pickle.load(f) with open(query_dir + 'cluster_centroid_dict.pkl', 'rb') as f: cluster_centroid_dict = pickle.load(f) print(len(all_querys)) ### run Search_struct workdir = '/mnt/e/DesignData/ligands/LigandBB/MID1sc10/' outdir = workdir + 'output_selfcenter/' target_path = workdir + '5od1_zn.pdb' win_filter = [35, 61, 65] # workdir = '/mnt/e/DesignData/ligands/LigandBB/6dwv/' # outdir = workdir + 'output_selfcenter/' # target_path = workdir + '6dwv_core.pdb' # win_filter = [] # workdir = '/mnt/e/DesignData/ligands/LigandBB/8adh/' # outdir = workdir + 'output_selfcenter/' # target_path = workdir + '1989_8adh_ZN_1.pdb' # win_filter = [] # workdir = '/mnt/e/DesignData/ligands/LigandBB/3f7u_lig/' # outdir = workdir + 'output_selfcenter/' # target_path = workdir + '3f7u1aa.pdb' # win_filter = [94, 96, 119] # workdir = '/mnt/e/DesignData/ligands/LigandBB/2afw_lig/' # outdir = workdir + 'output_selfcenter/' # target_path = workdir + '2afw_aa.pdb' # win_filter = [159, 202, 330] # workdir = '/mnt/e/DesignData/ligands/LigandBB/huong/' # outdir = workdir + 'output_selfcenter/' # target_path = workdir + 'aQ4x_aa.pdb' # win_filter = ['I-3', 'I-6', 'I-10', 'I-13', 'I-17', 'I-20', # 'J-3', 'J-6', 'J-7', 'J-10', 'J-13', 'J-14', 'J-17', 'J-20', 'J-21', # 'K-6', 'K-10', 'K-13', 'K-17', 'K-20', # 'L-3', 'L-6', 'L-7', 'L-10', 'L-13', 'L-14', 'L-17', 'L-20', 'L-21', 'L-24', # 'M-3', 'M-6', 'M-10', 'M-13', 'M-17', 'M-20', # 'N-3', 'N-6', 'N-7', 'N-10', 'N-13', 'N-14', 'N-17', 'N-20', 'N-21' # ] geometry_path = None #geometry_path = workdir + 'tetrahydral_geo.pdb' metal_metal_dist = 0.3 num_contact_vdms = [3] allowed_aa_combinations = [['H', 'H', 'H']] allowed_aa_combinations = [] _filter = filter.Search_filter(filter_abple = False, filter_phipsi = True, max_phipsi_val = 25, filter_vdm_score = False, min_vdm_score = 0, filter_vdm_count = False, min_vdm_clu_num = 20, after_search_filter_geometry = True, filter_based_geometry_structure = False, angle_tol = 15, aa_aa_tol = 0.3, aa_metal_tol = 0.2, pair_angle_range = [85, 130], pair_aa_aa_dist_range = [2.8, 4], pair_metal_aa_dist_range = None, after_search_filter_qt_clash = True, vdm_vdm_clash_dist = 2.7, vdm_bb_clash_dist = 2.2, after_search_open_site_clash = True, open_site_dist = 3.0, write_filtered_result = False, selfcenter_filter_member_phipsi=True) ss = search_selfcenter.Search_selfcenter(target_path, outdir, all_querys, cluster_centroid_dict, query_all_metal, num_contact_vdms, metal_metal_dist, win_filter, validateOriginStruct = True, search_filter= _filter, geometry_path = None, density_radius = 0.6, allowed_aa_combinations = allowed_aa_combinations, output_wincomb_overlap=True) #ss.run_selfcenter_search() search_selfcenter.run_search_selfcenter(ss) end_time = time.time() print(end_time - start_time, "seconds")
scrips/search_selfcenter/run_selfcenter_search.py
3,575
You can either add the python package path.sys.path.append(r'/mnt/e/GitHub_Design/Metalprot') run Search_struct workdir = '/mnt/e/DesignData/ligands/LigandBB/6dwv/' outdir = workdir + 'output_selfcenter/' target_path = workdir + '6dwv_core.pdb' win_filter = [] workdir = '/mnt/e/DesignData/ligands/LigandBB/8adh/' outdir = workdir + 'output_selfcenter/' target_path = workdir + '1989_8adh_ZN_1.pdb' win_filter = [] workdir = '/mnt/e/DesignData/ligands/LigandBB/3f7u_lig/' outdir = workdir + 'output_selfcenter/' target_path = workdir + '3f7u1aa.pdb' win_filter = [94, 96, 119] workdir = '/mnt/e/DesignData/ligands/LigandBB/2afw_lig/' outdir = workdir + 'output_selfcenter/' target_path = workdir + '2afw_aa.pdb' win_filter = [159, 202, 330] workdir = '/mnt/e/DesignData/ligands/LigandBB/huong/' outdir = workdir + 'output_selfcenter/' target_path = workdir + 'aQ4x_aa.pdb' win_filter = ['I-3', 'I-6', 'I-10', 'I-13', 'I-17', 'I-20', 'J-3', 'J-6', 'J-7', 'J-10', 'J-13', 'J-14', 'J-17', 'J-20', 'J-21', 'K-6', 'K-10', 'K-13', 'K-17', 'K-20', 'L-3', 'L-6', 'L-7', 'L-10', 'L-13', 'L-14', 'L-17', 'L-20', 'L-21', 'L-24', 'M-3', 'M-6', 'M-10', 'M-13', 'M-17', 'M-20', 'N-3', 'N-6', 'N-7', 'N-10', 'N-13', 'N-14', 'N-17', 'N-20', 'N-21' ]geometry_path = workdir + 'tetrahydral_geo.pdb'ss.run_selfcenter_search()
1,388
en
0.469983
# coding=utf-8 import logging import pytest from rasa_nlu.evaluate import ( is_token_within_entity, do_entities_overlap, merge_labels, remove_duckling_entities, remove_empty_intent_examples, get_entity_extractors, get_duckling_dimensions, known_duckling_dimensions, find_component, remove_duckling_extractors, drop_intents_below_freq, run_cv_evaluation, substitute_labels, IntentEvaluationResult, evaluate_intents, evaluate_entities) from rasa_nlu.evaluate import does_token_cross_borders from rasa_nlu.evaluate import align_entity_predictions from rasa_nlu.evaluate import determine_intersection from rasa_nlu.evaluate import determine_token_labels from rasa_nlu.config import RasaNLUModelConfig from rasa_nlu.tokenizers import Token from rasa_nlu import utils import json import os from rasa_nlu import training_data, config from tests import utilities logging.basicConfig(level="DEBUG") @pytest.fixture(scope="session") def duckling_interpreter(component_builder, tmpdir_factory): conf = RasaNLUModelConfig({"pipeline": [{"name": "ner_duckling_http"}]}) return utilities.interpreter_for( component_builder, data="./data/examples/rasa/demo-rasa.json", path=tmpdir_factory.mktemp("projects").strpath, config=conf) # Chinese Example # "对面食过敏" -> To be allergic to wheat-based food CH_wrong_segmentation = [Token("对面", 0), Token("食", 2), Token("过敏", 3)] # opposite, food, allergy CH_correct_segmentation = [Token("对", 0), Token("面食", 1), Token("过敏", 3)] # towards, wheat-based food, allergy CH_wrong_entity = { "start": 0, "end": 2, "value": "对面", "entity": "direction" } CH_correct_entity = { "start": 1, "end": 3, "value": "面食", "entity": "food_type" } # EN example # "Hey Robot, I would like to eat pizza near Alexanderplatz tonight" EN_indices = [0, 4, 9, 11, 13, 19, 24, 27, 31, 37, 42, 57] EN_tokens = ["Hey", "Robot", ",", "I", "would", "like", "to", "eat", "pizza", "near", "Alexanderplatz", "tonight"] EN_tokens = [Token(t, i) for t, i in zip(EN_tokens, EN_indices)] EN_targets = [ { "start": 31, "end": 36, "value": "pizza", "entity": "food" }, { "start": 37, "end": 56, "value": "near Alexanderplatz", "entity": "location" }, { "start": 57, "end": 64, "value": "tonight", "entity": "datetime" } ] EN_predicted = [ { "start": 4, "end": 9, "value": "Robot", "entity": "person", "extractor": "A" }, { "start": 31, "end": 36, "value": "pizza", "entity": "food", "extractor": "A" }, { "start": 42, "end": 56, "value": "Alexanderplatz", "entity": "location", "extractor": "A" }, { "start": 42, "end": 64, "value": "Alexanderplatz tonight", "entity": "movie", "extractor": "B" } ] def test_token_entity_intersection(): # included intsec = determine_intersection(CH_correct_segmentation[1], CH_correct_entity) assert intsec == len(CH_correct_segmentation[1].text) # completely outside intsec = determine_intersection(CH_correct_segmentation[2], CH_correct_entity) assert intsec == 0 # border crossing intsec = determine_intersection(CH_correct_segmentation[1], CH_wrong_entity) assert intsec == 1 def test_token_entity_boundaries(): # smaller and included assert is_token_within_entity(CH_wrong_segmentation[1], CH_correct_entity) assert not does_token_cross_borders(CH_wrong_segmentation[1], CH_correct_entity) # exact match assert is_token_within_entity(CH_correct_segmentation[1], CH_correct_entity) assert not does_token_cross_borders(CH_correct_segmentation[1], CH_correct_entity) # completely outside assert not is_token_within_entity(CH_correct_segmentation[0], CH_correct_entity) assert not does_token_cross_borders(CH_correct_segmentation[0], CH_correct_entity) # border crossing assert not is_token_within_entity(CH_wrong_segmentation[0], CH_correct_entity) assert does_token_cross_borders(CH_wrong_segmentation[0], CH_correct_entity) def test_entity_overlap(): assert do_entities_overlap([CH_correct_entity, CH_wrong_entity]) assert not do_entities_overlap(EN_targets) def test_determine_token_labels_throws_error(): with pytest.raises(ValueError): determine_token_labels(CH_correct_segmentation, [CH_correct_entity, CH_wrong_entity], ["ner_crf"]) def test_determine_token_labels_no_extractors(): determine_token_labels(CH_correct_segmentation[0], [CH_correct_entity, CH_wrong_entity], None) def test_determine_token_labels_with_extractors(): determine_token_labels(CH_correct_segmentation[0], [CH_correct_entity, CH_wrong_entity], ["A", "B"]) def test_label_merging(): aligned_predictions = [ {"target_labels": ["O", "O"], "extractor_labels": {"A": ["O", "O"]}}, {"target_labels": ["LOC", "O", "O"], "extractor_labels": {"A": ["O", "O", "O"]}} ] assert all(merge_labels(aligned_predictions) == ["O", "O", "LOC", "O", "O"]) assert all(merge_labels(aligned_predictions, "A") == ["O", "O", "O", "O", "O"]) def test_duckling_patching(): entities = [[ { "start": 37, "end": 56, "value": "near Alexanderplatz", "entity": "location", "extractor": "ner_crf" }, { "start": 57, "end": 64, "value": "tonight", "entity": "Time", "extractor": "ner_duckling_http" } ]] patched = [[ { "start": 37, "end": 56, "value": "near Alexanderplatz", "entity": "location", "extractor": "ner_crf" } ]] assert remove_duckling_entities(entities) == patched def test_drop_intents_below_freq(): td = training_data.load_data('data/examples/rasa/demo-rasa.json') clean_td = drop_intents_below_freq(td, 0) assert clean_td.intents == {'affirm', 'goodbye', 'greet', 'restaurant_search'} clean_td = drop_intents_below_freq(td, 10) assert clean_td.intents == {'affirm', 'restaurant_search'} def test_run_cv_evaluation(): td = training_data.load_data('data/examples/rasa/demo-rasa.json') nlu_config = config.load("sample_configs/config_spacy.yml") n_folds = 2 results, entity_results = run_cv_evaluation(td, n_folds, nlu_config) assert len(results.train["Accuracy"]) == n_folds assert len(results.train["Precision"]) == n_folds assert len(results.train["F1-score"]) == n_folds assert len(results.test["Accuracy"]) == n_folds assert len(results.test["Precision"]) == n_folds assert len(results.test["F1-score"]) == n_folds assert len(entity_results.train['ner_crf']["Accuracy"]) == n_folds assert len(entity_results.train['ner_crf']["Precision"]) == n_folds assert len(entity_results.train['ner_crf']["F1-score"]) == n_folds assert len(entity_results.test['ner_crf']["Accuracy"]) == n_folds assert len(entity_results.test['ner_crf']["Precision"]) == n_folds assert len(entity_results.test['ner_crf']["F1-score"]) == n_folds def test_intent_evaluation_report(tmpdir_factory): path = tmpdir_factory.mktemp("evaluation").strpath report_folder = os.path.join(path, "reports") report_filename = os.path.join(report_folder, "intent_report.json") utils.create_dir(report_folder) intent_results = [ IntentEvaluationResult("", "restaurant_search", "I am hungry", 0.12345), IntentEvaluationResult("greet", "greet", "hello", 0.98765)] result = evaluate_intents(intent_results, report_folder, successes_filename=None, errors_filename=None, confmat_filename=None, intent_hist_filename=None) report = json.loads(utils.read_file(report_filename)) greet_results = {"precision": 1.0, "recall": 1.0, "f1-score": 1.0, "support": 1} prediction = {'text': 'hello', 'intent': 'greet', 'predicted': 'greet', 'confidence': 0.98765} assert len(report.keys()) == 4 assert report["greet"] == greet_results assert result["predictions"][0] == prediction def test_entity_evaluation_report(tmpdir_factory): path = tmpdir_factory.mktemp("evaluation").strpath report_folder = os.path.join(path, "reports") mock_extractors = ["A", "B"] report_filename_a = os.path.join(report_folder, "A_report.json") report_filename_b = os.path.join(report_folder, "B_report.json") utils.create_dir(report_folder) result = evaluate_entities([EN_targets], [EN_predicted], [EN_tokens], mock_extractors, report_folder) report_a = json.loads(utils.read_file(report_filename_a)) report_b = json.loads(utils.read_file(report_filename_b)) assert len(report_a) == 8 assert report_a["datetime"]["support"] == 1.0 assert report_b["macro avg"]["recall"] == 0.2 assert result["A"]["accuracy"] == 0.75 def test_empty_intent_removal(): intent_results = [ IntentEvaluationResult("", "restaurant_search", "I am hungry", 0.12345), IntentEvaluationResult("greet", "greet", "hello", 0.98765) ] intent_results = remove_empty_intent_examples(intent_results) assert len(intent_results) == 1 assert intent_results[0].target == "greet" assert intent_results[0].prediction == "greet" assert intent_results[0].confidence == 0.98765 assert intent_results[0].message == "hello" def test_evaluate_entities_cv_empty_tokens(): mock_extractors = ["A", "B"] result = align_entity_predictions(EN_targets, EN_predicted, [], mock_extractors) assert result == { "target_labels": [], "extractor_labels": { "A": [], "B": [] } }, "Wrong entity prediction alignment" def test_evaluate_entities_cv(): mock_extractors = ["A", "B"] result = align_entity_predictions(EN_targets, EN_predicted, EN_tokens, mock_extractors) assert result == { "target_labels": ["O", "O", "O", "O", "O", "O", "O", "O", "food", "location", "location", "datetime"], "extractor_labels": { "A": ["O", "person", "O", "O", "O", "O", "O", "O", "food", "O", "location", "O"], "B": ["O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "movie", "movie"] } }, "Wrong entity prediction alignment" def test_get_entity_extractors(duckling_interpreter): assert get_entity_extractors(duckling_interpreter) == {"ner_duckling_http"} def test_get_duckling_dimensions(duckling_interpreter): dims = get_duckling_dimensions(duckling_interpreter, "ner_duckling_http") assert set(dims) == known_duckling_dimensions def test_find_component(duckling_interpreter): name = find_component(duckling_interpreter, "ner_duckling_http").name assert name == "ner_duckling_http" def test_remove_duckling_extractors(duckling_interpreter): target = set([]) patched = remove_duckling_extractors({"ner_duckling_http"}) assert patched == target def test_label_replacement(): original_labels = ["O", "location"] target_labels = ["no_entity", "location"] assert substitute_labels(original_labels, "O", "no_entity") == target_labels
tests/base/test_evaluation.py
12,735
coding=utf-8 Chinese Example "对面食过敏" -> To be allergic to wheat-based food opposite, food, allergy towards, wheat-based food, allergy EN example "Hey Robot, I would like to eat pizza near Alexanderplatz tonight" included completely outside border crossing smaller and included exact match completely outside border crossing
323
en
0.956874
# coding: utf-8 """ OpenAPI Petstore This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import sys import unittest import petstore_api try: from petstore_api.model import child_all_of except ImportError: child_all_of = sys.modules[ 'petstore_api.model.child_all_of'] try: from petstore_api.model import parent except ImportError: parent = sys.modules[ 'petstore_api.model.parent'] from petstore_api.model.child import Child class TestChild(unittest.TestCase): """Child unit test stubs""" def setUp(self): pass def tearDown(self): pass def testChild(self): """Test Child This will fail because additional_properties_type is None in ChildAllOf and it must be defined as any type to allow in the property radio_waves which is not defined in ChildAllOf, it is defined in Grandparent """ # make an instance of Child, a composed schema model radio_waves = True tele_vision = True inter_net = True with self.assertRaises(petstore_api.exceptions.ApiValueError): child = Child( radio_waves=radio_waves, tele_vision=tele_vision, inter_net=inter_net ) if __name__ == '__main__': unittest.main()
samples/client/petstore/python_disallowAdditionalPropertiesIfNotPresent/test/test_child.py
1,575
Child unit test stubs Test Child This will fail because additional_properties_type is None in ChildAllOf and it must be defined as any type to allow in the property radio_waves which is not defined in ChildAllOf, it is defined in Grandparent OpenAPI Petstore This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: " \ # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech coding: utf-8 make an instance of Child, a composed schema model
584
en
0.877746
# Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import TYPE_CHECKING, Dict, Optional import torch from monai.utils import exact_version, optional_import Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events") Checkpoint, _ = optional_import("ignite.handlers", "0.3.0", exact_version, "Checkpoint") if TYPE_CHECKING: from ignite.engine import Engine else: Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine") class CheckpointLoader: """ CheckpointLoader acts as an Ignite handler to load checkpoint data from file. It can load variables for network, optimizer, lr_scheduler, etc. If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead as PyTorch recommended and then use this loader to load the model. Args: load_path: the file path of checkpoint, it should be a PyTorch `pth` file. load_dict: target objects that load checkpoint to. examples:: {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler} name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``. map_location: when loading the module for distributed training/evaluation, need to provide an appropriate map_location argument to prevent a process to step into others’ devices. If map_location is missing, torch.load will first load the module to CPU and then copy each parameter to where it was saved, which would result in all processes on the same machine using the same set of devices. """ def __init__( self, load_path: str, load_dict: Dict, name: Optional[str] = None, map_location: Optional[Dict] = None, ) -> None: assert load_path is not None, "must provide clear path to load checkpoint." self.load_path = load_path assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load." self.logger = logging.getLogger(name) for k, v in load_dict.items(): if hasattr(v, "module"): load_dict[k] = v.module self.load_dict = load_dict self._name = name self.map_location = map_location def attach(self, engine: Engine) -> None: """ Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ if self._name is None: self.logger = engine.logger engine.add_event_handler(Events.STARTED, self) def __call__(self, engine: Engine) -> None: """ Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ checkpoint = torch.load(self.load_path, map_location=self.map_location) if len(self.load_dict) == 1: key = list(self.load_dict.keys())[0] if not (key in checkpoint): checkpoint = {key: checkpoint} Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint) self.logger.info(f"Restored all variables from {self.load_path}")
monai/handlers/checkpoint_loader.py
3,676
CheckpointLoader acts as an Ignite handler to load checkpoint data from file. It can load variables for network, optimizer, lr_scheduler, etc. If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead as PyTorch recommended and then use this loader to load the model. Args: load_path: the file path of checkpoint, it should be a PyTorch `pth` file. load_dict: target objects that load checkpoint to. examples:: {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler} name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``. map_location: when loading the module for distributed training/evaluation, need to provide an appropriate map_location argument to prevent a process to step into others’ devices. If map_location is missing, torch.load will first load the module to CPU and then copy each parameter to where it was saved, which would result in all processes on the same machine using the same set of devices. Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. Copyright 2020 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
1,766
en
0.801732
#pip install flask-bcrypt from flask_bcrypt import Bcrypt # Create the Hasher bcrypt = Bcrypt() hashed_pass = bcrypt.generate_password_hash('somethingSuperSecret') print(hashed_pass) wrong_check = bcrypt.check_password_hash(hashed_pass, 'wrongpass') print(wrong_check) right_check = bcrypt.check_password_hash(hashed_pass, 'somethingSuperSecret') print(right_check)
07-User-Authentication/00-Password-Hashing/Using-Bcrypt.py
370
pip install flask-bcrypt Create the Hasher
42
en
0.333513
#!/usr/bin/env python3 # =============================================================================== # NAME: XmlSerializeParser.py # # DESCRIPTION: This class parses the XML serializable types files. # # USAGE: # # AUTHOR: reder # EMAIL: reder@jpl.nasa.gov # DATE CREATED : June 4, 2013 # # Copyright 2013, California Institute of Technology. # ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged. # =============================================================================== # # Python standard modules # import hashlib import logging import os import sys from lxml import etree from fprime_ac.utils import ConfigManager from fprime_ac.utils.buildroot import ( BuildRootCollisionException, BuildRootMissingException, locate_build_root, ) from fprime_ac.utils.exceptions import FprimeXmlException # # Python extention modules and custom interfaces # # # Universal globals used within module go here. # (DO NOT USE MANY!) # # Global logger init. below. PRINT = logging.getLogger("output") DEBUG = logging.getLogger("debug") format_dictionary = { "U8": "%u", "I8": "%d", "U16": "%u", "I16": "%d", "U32": "%u", "I32": "%d", "U64": "%lu", "I64": "%ld", "F32": "%g", "F64": "%g", "bool": "%s", "string": "%s", "ENUM": "%d", } # class XmlSerializeParser: """ An XML parser class that uses lxml.etree to consume an XML serializable type documents. The class is instanced with an XML file name. """ def __init__(self, xml_file=None): """ Given a well formed XML file (xml_file), read it and turn it into a big string. """ self.__root = None self.__name = "" self.__namespace = None # List of C++ include files for serializable *.hpp file self.__include_header_files = [] # List of XML serializable description dependencies self.__includes = [] # List of XML enum type files self.__include_enum_files = [] # List of XML array type files self.__include_array_files = [] # Comment block of text for serializable self.__comment = "" # List of (name, type, comment) tuples self.__members = [] # Type ID for serialized type self.__type_id = None # if os.path.isfile(xml_file) == False: stri = "ERROR: Could not find specified XML file %s." % xml_file raise OSError(stri) fd = open(xml_file) xml_file = os.path.basename(xml_file) # xml_file = os.path.basename(xml_file) self.__xml_filename = xml_file self.__config = ConfigManager.ConfigManager.getInstance() # xml_parser = etree.XMLParser(remove_comments=True) element_tree = etree.parse(fd, parser=xml_parser) # Validate new imports using their root tag as a key to find what schema to use rng_file = self.__config.get( "schema", element_tree.getroot().tag.lower() ).lstrip("/") try: rng_file = locate_build_root(rng_file) except (BuildRootMissingException, BuildRootCollisionException) as bre: stri = "ERROR: Could not find specified RNG file {}. {}".format( rng_file, str(bre), ) raise OSError(stri) file_handler = open(rng_file) relax_parsed = etree.parse(file_handler) file_handler.close() relax_compiled = etree.RelaxNG(relax_parsed) # 2/3 conversion if not relax_compiled.validate(element_tree): msg = "XML file {} is not valid according to schema {}.".format( xml_file, rng_file ) raise FprimeXmlException(msg) serializable = element_tree.getroot() if serializable.tag != "serializable": PRINT.info("%s is not a serializable definition file" % xml_file) sys.exit(-1) print("Parsing Serializable %s" % serializable.attrib["name"]) self.__name = serializable.attrib["name"] if "namespace" in serializable.attrib: self.__namespace = serializable.attrib["namespace"] else: self.__namespace = None if "typeid" in serializable.attrib: self.__type_id = serializable.attrib["typeid"] else: self.__type_id = None for serializable_tag in serializable: if serializable_tag.tag == "comment": self.__comment = serializable_tag.text.strip() elif serializable_tag.tag == "include_header": self.__include_header_files.append(serializable_tag.text) elif serializable_tag.tag == "import_serializable_type": self.__includes.append(serializable_tag.text) elif serializable_tag.tag == "import_enum_type": self.__include_enum_files.append(serializable_tag.text) elif serializable_tag.tag == "import_array_type": self.__include_array_files.append(serializable_tag.text) elif serializable_tag.tag == "members": for member in serializable_tag: if member.tag != "member": PRINT.info( "%s: Invalid tag %s in serializable member definition" % (xml_file, member.tag) ) sys.exit(-1) n = member.attrib["name"] t = member.attrib["type"] if "size" in list(member.attrib.keys()): if t == "ENUM": PRINT.info( "%s: Member %s: arrays of enums not supported yet!" % (xml_file, n) ) sys.exit(-1) s = member.attrib["size"] if not s.isdigit(): PRINT.info( "{}: Member {}: size must be a number".format( xml_file, n ) ) sys.exit(-1) else: s = None if "format" in list(member.attrib.keys()): f = member.attrib["format"] else: if t in list(format_dictionary.keys()): f = format_dictionary[t] else: # Must be included type, which will use toString method f = "%s" if t == "string": if s is None: PRINT.info( "%s: member %s string must specify size tag" % (xml_file, member.tag) ) sys.exit(-1) if "comment" in list(member.attrib.keys()): c = member.attrib["comment"] else: c = None for member_tag in member: if member_tag.tag == "enum" and t == "ENUM": en = member_tag.attrib["name"] enum_members = [] for mem in member_tag: mn = mem.attrib["name"] if "value" in list(mem.attrib.keys()): v = mem.attrib["value"] else: v = None if "comment" in list(mem.attrib.keys()): mc = mem.attrib["comment"].strip() else: mc = None enum_members.append((mn, v, mc)) t = ((t, en), enum_members) else: PRINT.info( "%s: Invalid member tag %s in serializable member %s" % (xml_file, member_tag.tag, n) ) sys.exit(-1) self.__members.append((n, t, s, f, c)) # # Generate a type id here using SHA256 algorithm and XML stringified file. # if not "typeid" in serializable.attrib: s = etree.tostring(element_tree.getroot()) h = hashlib.sha256(s) n = h.hexdigest() self.__type_id = "0x" + n.upper()[-8:] def get_typeid(self): """ Return a generated type ID from contents of XML file. """ return self.__type_id def get_xml_filename(self): """ Return the original XML filename parsed. """ return self.__xml_filename def get_name(self): return self.__name def get_namespace(self): return self.__namespace def get_include_header_files(self): """ Return a list of all imported Port type XML files. """ return self.__include_header_files def get_includes(self): """ Returns a list of all imported XML serializable files. """ return self.__includes def get_include_enums(self): """ Returns a list of all imported XML enum files. """ return self.__include_enum_files def get_include_arrays(self): """ Returns a list of all imported XML array files. """ return self.__include_array_files def get_comment(self): """ Return text block string of comment for serializable class. """ return self.__comment def get_members(self): """ Returns a list of member (name, type, optional size, optional format, optional comment) needed. """ return self.__members
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
10,160
An XML parser class that uses lxml.etree to consume an XML serializable type documents. The class is instanced with an XML file name. Given a well formed XML file (xml_file), read it and turn it into a big string. Return text block string of comment for serializable class. Returns a list of all imported XML array files. Returns a list of all imported XML enum files. Return a list of all imported Port type XML files. Returns a list of all imported XML serializable files. Returns a list of member (name, type, optional size, optional format, optional comment) needed. Return a generated type ID from contents of XML file. Return the original XML filename parsed. !/usr/bin/env python3 =============================================================================== NAME: XmlSerializeParser.py DESCRIPTION: This class parses the XML serializable types files. USAGE: AUTHOR: reder EMAIL: reder@jpl.nasa.gov DATE CREATED : June 4, 2013 Copyright 2013, California Institute of Technology. ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged. =============================================================================== Python standard modules Python extention modules and custom interfaces Universal globals used within module go here. (DO NOT USE MANY!) Global logger init. below. List of C++ include files for serializable *.hpp file List of XML serializable description dependencies List of XML enum type files List of XML array type files Comment block of text for serializable List of (name, type, comment) tuples Type ID for serialized type xml_file = os.path.basename(xml_file) Validate new imports using their root tag as a key to find what schema to use 2/3 conversion Must be included type, which will use toString method Generate a type id here using SHA256 algorithm and XML stringified file.
1,828
en
0.615762
from .core import Pool, CatboostError, get_catboost_bin_module, ARRAY_TYPES from collections import defaultdict import numpy as np _catboost = get_catboost_bin_module() _eval_metric_util = _catboost._eval_metric_util _get_roc_curve = _catboost._get_roc_curve _select_threshold = _catboost._select_threshold def create_cd( label=None, cat_features=None, weight=None, baseline=None, doc_id=None, group_id=None, subgroup_id=None, timestamp=None, auxiliary_columns=None, feature_names=None, output_path='train.cd' ): _from_param_to_cd = { 'label': 'Label', 'weight': 'Weight', 'baseline': 'Baseline', 'doc_id': 'DocId', 'group_id': 'GroupId', 'subgroup_id': 'SubgroupId', 'timestamp': 'Timestamp' } _column_description = defaultdict(lambda: ['Num', '']) for key, value in locals().copy().items(): if not (key.startswith('_') or value is None): if key in ('cat_features', 'auxiliary_columns'): if isinstance(value, int): value = [value] for index in value: if not isinstance(index, int): raise CatboostError('Unsupported index type. Expected int, got {}'.format(type(index))) if index in _column_description: raise CatboostError('The index {} occurs more than once'.format(index)) _column_description[index] = ['Categ', ''] if key == 'cat_features' else ['Auxiliary', ''] elif key not in ('feature_names', 'output_path'): if not isinstance(value, int): raise CatboostError('Unsupported index type. Expected int, got {}'.format(type(value))) if value in _column_description: raise CatboostError('The index {} occurs more than once'.format(value)) _column_description[value] = [_from_param_to_cd[key], ''] if feature_names is not None: for feature_index, name in feature_names.items(): real_feature_index = feature_index for column_index, (title, _) in sorted(_column_description.items()): if column_index > real_feature_index: break if title not in ('Num', 'Categ'): real_feature_index += 1 _column_description[real_feature_index][1] = name with open(output_path, 'w') as f: for index, (title, name) in sorted(_column_description.items()): f.write('{}\t{}\t{}\n'.format(index, title, name)) def eval_metric(label, approx, metric, weight=None, group_id=None, thread_count=-1): """ Evaluate metrics with raw approxes and labels. Parameters ---------- label : list or numpy.arrays or pandas.DataFrame or pandas.Series Object labels. approx : list or numpy.arrays or pandas.DataFrame or pandas.Series Object approxes. metrics : list of strings List of eval metrics. weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None) Object weights. group_id : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None) Object group ids. thread_count : int, optional (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- metric results : list with metric values. """ if len(approx) == 0: approx = [[]] if not isinstance(approx[0], ARRAY_TYPES): approx = [approx] return _eval_metric_util(label, approx, metric, weight, group_id, thread_count) def get_gpu_device_count(): return get_catboost_bin_module()._get_gpu_device_count() def reset_trace_backend(filename): get_catboost_bin_module()._reset_trace_backend(filename) def get_roc_curve(model, data, thread_count=-1): """ Build points of ROC curve. Parameters ---------- model : catboost.CatBoost The trained model. data : catboost.Pool or list of catboost.Pool A set of samples to build ROC curve with. thread_count : int (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- curve points : tuple of three arrays (fpr, tpr, thresholds) """ if type(data) == Pool: data = [data] if not isinstance(data, list): raise CatboostError('data must be a catboost.Pool or list of pools.') for pool in data: if not isinstance(pool, Pool): raise CatboostError('one of data pools is not catboost.Pool') return _get_roc_curve(model._object, data, thread_count) def get_fpr_curve(model=None, data=None, curve=None, thread_count=-1): """ Build points of FPR curve. Parameters ---------- model : catboost.CatBoost The trained model. data : catboost.Pool or list of catboost.Pool A set of samples to build ROC curve with. curve : tuple of three arrays (fpr, tpr, thresholds) ROC curve points in format of get_roc_curve returned value. If set, data parameter must not be set. thread_count : int (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- curve points : tuple of two arrays (thresholds, fpr) """ if curve is not None: if data is not None: raise CatboostError('Only one of the parameters data and curve should be set.') if not (isinstance(curve, list) or isinstance(curve, tuple)) or len(curve) != 3: raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).') fpr, thresholds = curve[0][:], curve[2][:] else: if model is None or data is None: raise CatboostError('model and data parameters should be set when curve parameter is None.') fpr, _, thresholds = get_roc_curve(model, data, thread_count) return thresholds, fpr def get_fnr_curve(model=None, data=None, curve=None, thread_count=-1): """ Build points of FNR curve. Parameters ---------- model : catboost.CatBoost The trained model. data : catboost.Pool or list of catboost.Pool A set of samples to build ROC curve with. curve : tuple of three arrays (fpr, tpr, thresholds) ROC curve points in format of get_roc_curve returned value. If set, data parameter must not be set. thread_count : int (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- curve points : tuple of two arrays (thresholds, fnr) """ if curve is not None: if data is not None: raise CatboostError('Only one of the parameters data and curve should be set.') if not (isinstance(curve, list) or isinstance(curve, tuple)) or len(curve) != 3: raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).') tpr, thresholds = curve[1], curve[2][:] else: if model is None or data is None: raise CatboostError('model and data parameters should be set when curve parameter is None.') _, tpr, thresholds = get_roc_curve(model, data, thread_count) fnr = np.array([1 - x for x in tpr]) return thresholds, fnr def select_threshold(model=None, data=None, curve=None, FPR=None, FNR=None, thread_count=-1): """ Selects a threshold for prediction. Parameters ---------- model : catboost.CatBoost The trained model. data : catboost.Pool or list of catboost.Pool Set of samples to build ROC curve with. If set, curve parameter must not be set. curve : tuple of three arrays (fpr, tpr, thresholds) ROC curve points in format of get_roc_curve returned value. If set, data parameter must not be set. FPR : desired false-positive rate FNR : desired false-negative rate (only one of FPR and FNR should be chosen) thread_count : int (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- threshold : double """ if data is not None: if curve is not None: raise CatboostError('Only one of the parameters data and curve should be set.') if model is None: raise CatboostError('model and data parameters should be set when curve parameter is None.') if type(data) == Pool: data = [data] if not isinstance(data, list): raise CatboostError('data must be a catboost.Pool or list of pools.') for pool in data: if not isinstance(pool, Pool): raise CatboostError('one of data pools is not catboost.Pool') elif curve is not None: if not (isinstance(curve, list) or isinstance(curve, tuple)) or len(curve) != 3: raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).') else: raise CatboostError('One of the parameters data and curve should be set.') return _select_threshold(model._object, data, curve, FPR, FNR, thread_count)
catboost/python-package/catboost/utils.py
9,452
Evaluate metrics with raw approxes and labels. Parameters ---------- label : list or numpy.arrays or pandas.DataFrame or pandas.Series Object labels. approx : list or numpy.arrays or pandas.DataFrame or pandas.Series Object approxes. metrics : list of strings List of eval metrics. weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None) Object weights. group_id : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None) Object group ids. thread_count : int, optional (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- metric results : list with metric values. Build points of FNR curve. Parameters ---------- model : catboost.CatBoost The trained model. data : catboost.Pool or list of catboost.Pool A set of samples to build ROC curve with. curve : tuple of three arrays (fpr, tpr, thresholds) ROC curve points in format of get_roc_curve returned value. If set, data parameter must not be set. thread_count : int (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- curve points : tuple of two arrays (thresholds, fnr) Build points of FPR curve. Parameters ---------- model : catboost.CatBoost The trained model. data : catboost.Pool or list of catboost.Pool A set of samples to build ROC curve with. curve : tuple of three arrays (fpr, tpr, thresholds) ROC curve points in format of get_roc_curve returned value. If set, data parameter must not be set. thread_count : int (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- curve points : tuple of two arrays (thresholds, fpr) Build points of ROC curve. Parameters ---------- model : catboost.CatBoost The trained model. data : catboost.Pool or list of catboost.Pool A set of samples to build ROC curve with. thread_count : int (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- curve points : tuple of three arrays (fpr, tpr, thresholds) Selects a threshold for prediction. Parameters ---------- model : catboost.CatBoost The trained model. data : catboost.Pool or list of catboost.Pool Set of samples to build ROC curve with. If set, curve parameter must not be set. curve : tuple of three arrays (fpr, tpr, thresholds) ROC curve points in format of get_roc_curve returned value. If set, data parameter must not be set. FPR : desired false-positive rate FNR : desired false-negative rate (only one of FPR and FNR should be chosen) thread_count : int (default=-1) Number of threads to work with. If -1, then the number of threads is set to the number of cores. Returns ------- threshold : double
2,955
en
0.473411
"""Top-level package for Tinned Python.""" __author__ = """Tom Finill""" __email__ = 'tomfinill@gmail.com' __version__ = '0.1.0'
tinned_python/__init__.py
130
Top-level package for Tinned Python.
36
en
0.778082
""" Example code that implements a simple Neural Net predictor for z_mode, and Gaussian centered at z_mode with base_width read in fromfile and pdf width set to base_width*(1+zmode). """ import numpy as np # from numpy import inf import sklearn.neural_network as sknn from sklearn.preprocessing import StandardScaler from scipy.stats import norm from rail.estimation.estimator import Estimator as BaseEstimation def make_color_data(data_dict): """ make a dataset consisting of the i-band mag and the five colors Returns: -------- input_data: (nd-array) array of imag and 5 colors """ input_data = data_dict['mag_i_lsst'] bands = ['u', 'g', 'r', 'i', 'z', 'y'] # make colors and append to input data for i in range(5): # replace the non-detect 99s with 28.0 just arbitrarily for now band1 = data_dict[f'mag_{bands[i]}_lsst'] # band1err = data_dict[f'mag_err_{bands[i]}_lsst'] band2 = data_dict[f'mag_{bands[i+1]}_lsst'] # band2err = data_dict[f'mag_err_{bands[i+1]}_lsst'] # for j,xx in enumerate(band1): # if np.isclose(xx,99.,atol=.01): # band1[j] = band1err[j] # band1err[j] = 1.0 # for j,xx in enumerate(band2): # if np.isclose(xx,99.,atol=0.01): # band2[j] = band2err[j] # band2err[j] = 1.0 input_data = np.vstack((input_data, band1-band2)) return input_data.T def regularize_data(data): scaler = StandardScaler() scaler.fit(data) regularized_data = scaler.transform(data) return regularized_data class simpleNN(BaseEstimation): """ Subclass to implement a simple point estimate Neural Net photoz rather than actually predict PDF, for now just predict point zb and then put an error of width*(1+zb). We'll do a "real" NN photo-z later. """ def __init__(self, base_config, config_dict): """ Parameters: ----------- run_dict: dict dictionary of all variables read in from the run_params values in the yaml file """ super().__init__(base_config=base_config, config_dict=config_dict) inputs = self.config_dict['run_params'] self.width = inputs['width'] self.zmin = inputs['zmin'] self.zmax = inputs['zmax'] self.nzbins = inputs['nzbins'] np.random.seed(71) def inform(self): """ train the NN model """ speczs = self.training_data['redshift'] print("stacking some data...") color_data = make_color_data(self.training_data) input_data = regularize_data(color_data) simplenn = sknn.MLPRegressor(hidden_layer_sizes=(12, 12), activation='tanh', solver='lbfgs') simplenn.fit(input_data, speczs) self.model = simplenn def estimate(self, test_data): color_data = make_color_data(test_data) input_data = regularize_data(color_data) zmode = np.round(self.model.predict(input_data), 3) pdfs = [] widths = self.width * (1.0+zmode) self.zgrid = np.linspace(self.zmin, self.zmax, self.nzbins) for i, zb in enumerate(zmode): pdfs.append(norm.pdf(self.zgrid, zb, widths[i])) pz_dict = {'zmode': zmode, 'pz_pdf': pdfs} return pz_dict
rail/estimation/algos/sklearn_nn.py
3,394
Subclass to implement a simple point estimate Neural Net photoz rather than actually predict PDF, for now just predict point zb and then put an error of width*(1+zb). We'll do a "real" NN photo-z later. Parameters: ----------- run_dict: dict dictionary of all variables read in from the run_params values in the yaml file train the NN model make a dataset consisting of the i-band mag and the five colors Returns: -------- input_data: (nd-array) array of imag and 5 colors Example code that implements a simple Neural Net predictor for z_mode, and Gaussian centered at z_mode with base_width read in fromfile and pdf width set to base_width*(1+zmode). from numpy import inf make colors and append to input data replace the non-detect 99s with 28.0 just arbitrarily for now band1err = data_dict[f'mag_err_{bands[i]}_lsst'] band2err = data_dict[f'mag_err_{bands[i+1]}_lsst'] for j,xx in enumerate(band1): if np.isclose(xx,99.,atol=.01): band1[j] = band1err[j] band1err[j] = 1.0 for j,xx in enumerate(band2): if np.isclose(xx,99.,atol=0.01): band2[j] = band2err[j] band2err[j] = 1.0
1,120
en
0.669015
# problem 37 # Project Euler __author__ = 'Libao Jin' __date__ = 'July 17, 2015' def rotateDigits(number): string = str(number) rotatedString = string[::-1] rotatedNumber = int(rotatedString) return rotatedNumber def isSymmetrical(number): rotatedNumber = rotateDigits(number) if rotatedNumber == number: return True else: return False def toBinary(number): string = bin(number)[2:] bNumber = int(string) return bNumber def isPalindromic(number): if isSymmetrical(number): binaryNumber = toBinary(number) if isSymmetrical(binaryNumber): return True return False def doubleBasePalindromes(UPPER_BOUND): number = 1 DBP = [] while number < UPPER_BOUND: if isPalindromic(number): DBP.append(number) number += 1 pureDBP = DBP.copy() sumDBP = sum(DBP) for i,e in enumerate(DBP): DBP[i] = (e, toBinary(e)) return (sumDBP, pureDBP, DBP) def solution(): UPPER_BOUND = 1000000 DBP_Info = doubleBasePalindromes(UPPER_BOUND) print(DBP_Info) solution()
Python/Project.Euler/Answers.Python/36.py
991
problem 37 Project Euler
24
en
0.552765
""" Django settings for testapp project. Generated by 'django-admin startproject' using Django 1.10.6. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os import sys # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SITE_PATH = os.path.abspath(os.path.dirname(__file__)) MAP_WIDGETS_PATH = os.path.normpath(os.path.join(SITE_PATH, '..', '..', '..')) if MAP_WIDGETS_PATH not in sys.path: sys.path.insert(0, MAP_WIDGETS_PATH) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'o6b2c!r921-+^h7jlm&4x#sn53qwfif+@8(!4b*csitx+69b=5' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ["*"] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.gis', 'mapwidgets', 'widgets' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'testapp.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'testapp.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.contrib.gis.db.backends.postgis', 'NAME': 'mapwidget_db', 'USER': 'mapwidgetdbu', 'PASSWORD': 'mapwidgetdbu', 'HOST': 'postgres', 'PORT': '5432', } } if 'TRAVIS' in os.environ: DATABASES = { 'default': { 'ENGINE': 'django.contrib.gis.db.backends.postgis', 'NAME': 'travisci', 'USER': 'postgres', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '', } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_ROOT = os.path.join(BASE_DIR, 'assets/') STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media/') MEDIA_URL = '/uploads/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ] SITE_DOMAIN = 'django' TESTING = sys.argv[1:2] == ['test'] GOOGLE_MAP_API_KEY = os.environ.get('GOOGLE_MAP_API_KEY') try: from tests.testapp.testapp.settings_local import * except: pass
tests/testapp/testapp/settings.py
4,187
Django settings for testapp project. Generated by 'django-admin startproject' using Django 1.10.6. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/1.10/ref/settings/databases Password validation https://docs.djangoproject.com/en/1.10/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/1.10/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.10/howto/static-files/
996
en
0.632209
########################################################################## # # Copyright (c) 2017, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import random import unittest import imath import IECore import IECoreScene class MeshAlgoWindingTest( unittest.TestCase ) : def makeSingleTriangleMesh( self ): verticesPerFace = IECore.IntVectorData( [ 3 ] ) vertexIds = IECore.IntVectorData( [ 0, 1, 2 ] ) p = IECore.V3fVectorData( [ imath.V3f( 0, 0, 0 ), imath.V3f( 1, 0, 0 ), imath.V3f( 0, 1, 0 ) ] ) uv = IECore.V2fVectorData( [ imath.V2f( 0, 0 ), imath.V2f( 1, 0 ), imath.V2f( 0, 1 ) ] ) mesh = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds, "linear", p ) mesh["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, uv ) mesh["foo"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.V2fVectorData( [ imath.V2f( 0, 0 ), imath.V2f( 0, 1 ), imath.V2f( 1, 0 ) ] ) ) prefData = IECore.V3fVectorData( [ imath.V3f( 0, 0, 0 ), imath.V3f( 0, -1, 0 ), imath.V3f( 1, 0, 0 ) ] ) mesh["Pref"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, prefData ) return mesh def testSingleTriangle( self ) : mesh = self.makeSingleTriangleMesh() mesh.blindData()["test"] = IECore.IntData( 10 ) meshReversed = mesh.copy() IECoreScene.MeshAlgo.reverseWinding( meshReversed ) # Meshes should be identical self.assertEqual( meshReversed.interpolation, mesh.interpolation ) for interpolation in IECoreScene.PrimitiveVariable.Interpolation.values.values() : self.assertEqual( meshReversed.variableSize( interpolation ), mesh.variableSize( interpolation ) ) self.assertEqual( mesh.keys(), meshReversed.keys() ) self.assertEqual( mesh["P"], meshReversed["P"] ) self.assertEqual( mesh["Pref"], meshReversed["Pref"] ) self.assertEqual( mesh.blindData(), meshReversed.blindData() ) # Except for vertex ids, and facevarying data self.assertEqual( list( meshReversed.vertexIds ), list( reversed( mesh.vertexIds ) ) ) self.assertEqual( list( meshReversed["uv"].data ), list( reversed( mesh["uv"].data ) ) ) def testPlane( self ) : mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ), imath.V2i( 10 ) ) IECoreScene.TriangulateOp()( input = mesh, copyInput = False ) meshReversed = mesh.copy() IECoreScene.MeshAlgo.reverseWinding( meshReversed ) evaluator = IECoreScene.MeshPrimitiveEvaluator( mesh ) evaluatorReversed = IECoreScene.MeshPrimitiveEvaluator( meshReversed ) result = evaluator.createResult() resultReversed = evaluatorReversed.createResult() for i in range( 0, 1000 ) : p = imath.V3f( random.uniform( -1.0, 1.0 ), random.uniform( -1.0, 1.0 ), 0 ) evaluator.closestPoint( p, result ) evaluatorReversed.closestPoint( p, resultReversed ) self.assertEqual( resultReversed.normal(), -result.normal() ) reversedUV = resultReversed.vec2PrimVar( meshReversed["uv"] ) uv = result.vec2PrimVar( mesh["uv"] ) self.assertAlmostEqual( reversedUV[0], uv[0], delta = 0.0001 ) self.assertAlmostEqual( reversedUV[1], uv[1], delta = 0.0001 ) def testRoundTrip( self ) : mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ), imath.V2i( 10 ) ) meshReversed = mesh.copy() IECoreScene.MeshAlgo.reverseWinding( meshReversed ) meshReversedAgain = meshReversed.copy() IECoreScene.MeshAlgo.reverseWinding( meshReversedAgain ) self.assertEqual( mesh, meshReversedAgain ) def testUVIndices( self ) : verticesPerFace = IECore.IntVectorData( [ 3 ] ) vertexIds = IECore.IntVectorData( [ 0, 1, 2 ] ) p = IECore.V3fVectorData( [ imath.V3f( 0, 0, 0 ), imath.V3f( 1, 0, 0 ), imath.V3f( 0, 1, 0 ) ] ) uv = IECore.V2fVectorData( [ imath.V2f( 0, 0 ), imath.V2f( 1, 0 ), imath.V2f( 0, 1 ) ] ) uvIndices = IECore.IntVectorData( [ 0, 1, 2 ] ) mesh = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds, "linear", p ) mesh["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, uv, uvIndices ) meshReversed = mesh.copy() IECoreScene.MeshAlgo.reverseWinding( meshReversed ) # Meshes should be identical self.assertEqual( meshReversed.interpolation, mesh.interpolation ) for interpolation in IECoreScene.PrimitiveVariable.Interpolation.values.values() : self.assertEqual( meshReversed.variableSize( interpolation ), mesh.variableSize( interpolation ) ) self.assertEqual( mesh.keys(), meshReversed.keys() ) self.assertEqual( mesh["P"], meshReversed["P"] ) # UV indices should change, but UV data doesn't need to self.assertEqual( meshReversed["uv"].data, mesh["uv"].data ) self.assertEqual( list( meshReversed["uv"].indices ), list( reversed( mesh["uv"].indices ) ) ) if __name__ == "__main__": unittest.main()
test/IECoreScene/MeshAlgoWindingTest.py
6,517
Copyright (c) 2017, Image Engine Design Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Image Engine Design nor the names of any other contributors to this software may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Meshes should be identical Except for vertex ids, and facevarying data Meshes should be identical UV indices should change, but UV data doesn't need to
1,721
en
0.895843
from sqlalchemy.sql import func as fn from sqlalchemy import sql from ..translate import ( SqlTranslator, extend_base, sql_scalar, sql_agg, win_agg, win_cumul, annotate ) #from .postgresql import PostgresqlColumn, PostgresqlColumnAgg from .base import SqlColumn, SqlColumnAgg from . import _dt_generics as _dt # Data ---- class SnowflakeColumn(SqlColumn): pass class SnowflakeColumnAgg(SqlColumnAgg, SnowflakeColumn): pass # Translations ================================================================ @_dt.sql_func_last_day_in_period.register def sql_func_last_day_in_period(codata: SnowflakeColumn, col, period): return _dt.date_trunc(codata, col, period) \ + sql.text("interval '1 %s'" % period) \ - sql.text("interval '1 day'") # Scalar ---- extend_base( SnowflakeColumn, __floordiv__ = lambda _, x, y: fn.floor(x / y), __rfloordiv__ = lambda _, x, y: fn.floor(y / x), # connector has a bug with % # see: https://github.com/snowflakedb/snowflake-sqlalchemy/issues/246 __mod__ = lambda _, x, y: fn.mod(x, y), __rmod__ = lambda _, x, y: fn.mod(y, x), mod = lambda _, x, y: fn.mod(x,y), rmod = lambda _, x, y: fn.mod(y,x), # TODO: str.contains ) # Window ---- extend_base( SnowflakeColumn, all = win_agg("booland_agg"), any = win_agg("boolor_agg"), count = win_agg("count"), cumsum = annotate(win_cumul("sum"), result_type="variable"), # note that the number of decimal places Snowflake returns, and whether # the result is numeric depends on the input. mark as variable, so tests # do not check dtype # see https://community.snowflake.com/s/question/0D50Z000079hpxvSAA/numeric-calculations-truncated-to-3-decimal-places mean = annotate(win_agg("avg"), result_type="variable"), std = win_agg("stddev_samp"), sum = annotate(win_agg("sum"), result_type="variable"), var = win_agg("var_samp"), # str.contains # dt methods are more like base ) # Agg ---- extend_base( SnowflakeColumnAgg, all = sql_agg("booland_agg"), any = sql_agg("boolor_agg"), count = sql_agg("count"), std = sql_agg("stddev_samp"), var = sql_agg("var_samp"), ) translator = SqlTranslator.from_mappings( SnowflakeColumn, SnowflakeColumnAgg )
siuba/sql/dialects/snowflake.py
2,325
from .postgresql import PostgresqlColumn, PostgresqlColumnAgg Data ---- Translations ================================================================ Scalar ---- connector has a bug with % see: https://github.com/snowflakedb/snowflake-sqlalchemy/issues/246 TODO: str.contains Window ---- note that the number of decimal places Snowflake returns, and whether the result is numeric depends on the input. mark as variable, so tests do not check dtype see https://community.snowflake.com/s/question/0D50Z000079hpxvSAA/numeric-calculations-truncated-to-3-decimal-places str.contains dt methods are more like base Agg ----
616
en
0.58145
from __future__ import unicode_literals from __future__ import absolute_import if False: from typing import Type import six from smoke_tests.tools.compat import Path from smoke_tests.tools.package.base_builder import AgentImageBuilder AMAZONLINUX = "amazonlinux" UBUNTU = "ubuntu" ALL_DISTRIBUTION_NAMES = [AMAZONLINUX, UBUNTU] def get_agent_distribution_builder(distribution, python_version): # type: (six.text_type, six.text_type) -> Type[AgentImageBuilder] """ Find agent distribution docker image for smoke testing. :param distribution: distribution name on which agent package should be installed. Possible values are in the 'ALL_DISTRIBUTION_NAMES' constant. :param python_version: Version of the python interpreter in the distribution. """ distribution = distribution.lower() dockerfiles_directory_path = Path(__file__).parent / "distribution_dockerfiles" fpm_builder_dockerfile_path = dockerfiles_directory_path / "Dockerfile.fpm_package_builder" fpm_package_builder_dockerfile_content = fpm_builder_dockerfile_path.read_text() if distribution == AMAZONLINUX: class AmazonLinuxSmokeImageBuilder(AgentImageBuilder): PYTHON_VERSION = python_version COPY_AGENT_SOURCE = True IMAGE_TAG = "scalyr_agent_smoke_{0}_{1}".format( distribution, python_version ) @classmethod def get_dockerfile_content(cls): # type: () -> six.text_type dockerfile_path = dockerfiles_directory_path / "Dockerfile.amazonlinux" dockerfile_content = dockerfile_path.read_text() return dockerfile_content.format( fpm_package_builder_dockerfile=fpm_package_builder_dockerfile_content, python_version=cls.PYTHON_VERSION, ) return AmazonLinuxSmokeImageBuilder elif distribution == UBUNTU: class _UbuntuSmokeImageBuilder(AgentImageBuilder): PYTHON_VERSION = python_version COPY_AGENT_SOURCE = True IMAGE_TAG = "scalyr_agent_smoke_{0}_{1}".format( distribution, python_version ) @classmethod def get_dockerfile_content(cls): # type: () -> six.text_type dockerfile_path = dockerfiles_directory_path / "Dockerfile.ubuntu" dockerfile_content = dockerfile_path.read_text() return dockerfile_content.format( fpm_package_builder_dockerfile=fpm_package_builder_dockerfile_content, python_package_name="python" if cls.PYTHON_VERSION == "python2" else cls.PYTHON_VERSION, python_version=cls.PYTHON_VERSION, ) return _UbuntuSmokeImageBuilder else: raise IOError("Can not find such distribution: {0}".format(distribution))
smoke_tests/tools/package/__init__.py
2,952
Find agent distribution docker image for smoke testing. :param distribution: distribution name on which agent package should be installed. Possible values are in the 'ALL_DISTRIBUTION_NAMES' constant. :param python_version: Version of the python interpreter in the distribution. type: (six.text_type, six.text_type) -> Type[AgentImageBuilder] type: () -> six.text_type type: () -> six.text_type
396
en
0.705398
import csv import sys from pathlib import Path sys.path.append(str(Path(__file__).resolve().parent.parent)) from skimage.color import rgb2gray import numpy as np from tqdm import tqdm from tadataka import VisualOdometry, CameraParameters from tadataka.rigid import exp_se3, log_se3 from tadataka.projection import warp from tadataka.mapping import MapBuilder from tadataka.quaternion import quaternion_to_rotation from tadataka.datasets.tum_rgbd import TUMDataset, PoseSequence from visualization.plot import plot # dataset format is explained at # https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats# # intrinsic_camera_calibration_of_the_kinect dataset_root = Path("datasets", "rgbd_dataset_freiburg1_desk") # dataset_root = Path("datasets", "rgbd_dataset_freiburg2_pioneer_360") # dataset_root = Path("datasets", "rgbd_dataset_freiburg3_structure_texture_near") def error(image_true, image_pred, mask): return np.power(image_true[mask]-image_pred[mask], 2).mean() def visualize_error_function(camera_parameters, I0, D0, I1, xi_pred): def generate_error_curve(i, start, stop, n): xi = np.copy(xi_pred) vs = xi[i] + np.linspace(start, stop, n) errors = [] for v in vs: xi[i] = v DG = exp_se3(xi) estimated, mask = warp(camera_parameters, I1, D0, DG) errors.append(error(I0, estimated, mask)) errors = np.array(errors) return vs, errors from matplotlib import pyplot as plt fig = plt.figure() for xi_index, ax_index in enumerate([1, 3, 5, 2, 4, 6]): ax = fig.add_subplot(3, 2, ax_index) vs, errors = generate_error_curve(xi_index, start=-0.10, stop=0.10, n=101) ax.set_title("Axis {}".format(xi_index+1)) ax.axvline(vs[np.argmin(errors)], label="ground truth") ax.axvline(xi_pred[xi_index], color="red", label="prediction") ax.legend() ax.plot(vs, errors) plt.show() def main(): np.set_printoptions(suppress=True, precision=8, linewidth=1e8) camera_parameters = CameraParameters( focal_length=[525.0, 525.0], offset=[319.5, 239.5] ) dataset = TUMDataset(dataset_root) G = np.eye(4) frame0 = dataset.load_color(0) sequence_pred = PoseSequence() sequence_pred.add(frame0.timestamp_depth, G) for i in tqdm(range(1, dataset.size)): frame1 = dataset.load_color(i) # TODO not necessary to convert the color of the same image twice # we need to create a better interface vo = VisualOdometry(camera_parameters, rgb2gray(frame0.image), frame0.depth_map, rgb2gray(frame1.image)) DG = vo.estimate_motion(n_coarse_to_fine=6) G = np.dot(G, np.linalg.inv(DG)) sequence_pred.add(frame1.timestamp_depth, G) frame0 = frame1 sequence_pred.save("poses.txt") # TODO implement the following # pointcloud = map_builder.export() # export_pointcloud(pointcloud) main()
examples/rgbd_desk.py
3,095
dataset format is explained at https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats intrinsic_camera_calibration_of_the_kinect dataset_root = Path("datasets", "rgbd_dataset_freiburg2_pioneer_360") dataset_root = Path("datasets", "rgbd_dataset_freiburg3_structure_texture_near") TODO not necessary to convert the color of the same image twice we need to create a better interface TODO implement the following pointcloud = map_builder.export() export_pointcloud(pointcloud)
483
en
0.442715
DEPS = [ 'archive', 'chromium', 'chromium_android', 'depot_tools/bot_update', 'depot_tools/gclient', 'depot_tools/tryserver', 'file', 'recipe_engine/path', 'recipe_engine/platform', 'recipe_engine/properties', 'recipe_engine/python', 'recipe_engine/step', 'trigger', ] # TODO(phajdan.jr): provide coverage (http://crbug.com/693058). DISABLE_STRICT_COVERAGE = True
scripts/slave/recipe_modules/libyuv/__init__.py
392
TODO(phajdan.jr): provide coverage (http://crbug.com/693058).
61
en
0.464153
# -*- coding: utf-8 -*- # Copyright 2017-2018 ICON Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from tbears.block_manager.block_manager import PRepManager from tbears.config.tbears_config import keystore_test1 PREP_LIST = [ { "id": "hx86aba2210918a9b116973f3c4b27c41a54d5dafe", "publicKey": "04a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26", "p2pEndPoint": "target://123.45.67.89:7100" }, { "id": "hx13aca3210918a9b116973f3c4b27c41a54d5dad1", "publicKey": "0483ae642ca89c9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281e3a27", "p2pEndPoint": "target://210.34.56.17:7100" } ] class TestTBearsPRepManager(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_get_prev_block_contributors_info(self): # There is no P-Reps manager = PRepManager(is_generator_rotation=True, gen_count_per_leader=1) info = manager.get_prev_block_contributors_info() self.assertEqual(keystore_test1.get('address'), info.get('prevBlockGenerator')) self.assertEqual(0, len(info.get('prevBlockValidators'))) # There is 2 P-Reps manager = PRepManager(is_generator_rotation=True, gen_count_per_leader=1, prep_list=PREP_LIST) info = manager.get_prev_block_contributors_info() self.assertEqual(PREP_LIST[0].get('id'), info.get('prevBlockGenerator')) self.assertEqual(len(PREP_LIST) - 1, len(info.get('prevBlockValidators'))) self.assertEqual(PREP_LIST[1].get('id'), info.get('prevBlockValidators')[0]) # after rotate info = manager.get_prev_block_contributors_info() self.assertEqual(PREP_LIST[1].get('id'), info.get('prevBlockGenerator')) self.assertEqual(len(PREP_LIST) - 1, len(info.get('prevBlockValidators'))) self.assertEqual(PREP_LIST[0].get('id'), info.get('prevBlockValidators')[0])
tests/test_prep_manager.py
2,582
-*- coding: utf-8 -*- Copyright 2017-2018 ICON Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. There is no P-Reps There is 2 P-Reps after rotate
625
en
0.865514
# Python function to manipulate OpenFOAM files # Developer: Jian-Xun Wang (jwang33@nd.edu) ############################################################################### # system import import numpy as np import numpy.matlib import sys # Add extra path/directory import os import os.path as ospt import shutil import subprocess # Call the command line from subprocess import call import matplotlib.pyplot as plt # For plotting import re import tempfile import pdb from matplotlib import pyplot as plt # local import from PIL import Image import pandas as pd from mpl_toolkits.mplot3d import Axes3D from sklearn.neural_network import MLPRegressor import multiprocessing from functools import partial import time import multiprocessing from functools import partial import scipy.sparse as sp global unitTest unitTest = False; def readVectorFromFile(UFile): """ Arg: tauFile: The directory path of OpenFOAM vector file (e.g., velocity) Regurn: vector: Matrix of vector """ resMid = extractVector(UFile) fout = open('Utemp', 'w'); glob_pattern = resMid.group() glob_pattern = re.sub(r'\(', '', glob_pattern) glob_pattern = re.sub(r'\)', '', glob_pattern) fout.write(glob_pattern) fout.close(); vector = np.loadtxt('Utemp') return vector def readScalarFromFile(fileName): """ Arg: fileName: The file name of OpenFOAM scalar field Regurn: a vector of scalar field """ resMid = extractScalar(fileName) # write it in Tautemp fout = open('temp.txt', 'w') glob_patternx = resMid.group() glob_patternx = re.sub(r'\(', '', glob_patternx) glob_patternx = re.sub(r'\)', '', glob_patternx) fout.write(glob_patternx) fout.close(); scalarVec = np.loadtxt('temp.txt') return scalarVec ################################################ Regular Expression ##################################################### def extractVector(vectorFile): """ Function is using regular expression select Vector value out Args: UFile: The directory path of file: U Returns: resMid: the U as (Ux1,Uy1,Uz1);(Ux2,Uy2,Uz2);........ """ fin = open(vectorFile, 'r') # need consider directory line = fin.read() # line is U file to read fin.close() ### select U as (X X X)pattern (Using regular expression) patternMid = re.compile(r""" ( \( # match( [\+\-]?[\d]+([\.][\d]*)?([Ee][+-]?[\d]+)? # match figures (\ ) # match space [\+\-]?[\d]+([\.][\d]*)?([Ee][+-]?[\d]+)? # match figures (\ ) # match space [\+\-]?[\d]+([\.][\d]*)?([Ee][+-]?[\d]+)? # match figures \) # match ) \n # match next line )+ # search greedly """,re.DOTALL | re.VERBOSE) resMid = patternMid.search(line) return resMid def extractScalar(scalarFile): """ subFunction of readTurbStressFromFile Using regular expression to select scalar value out Args: scalarFile: The directory path of file of scalar Returns: resMid: scalar selected; you need use resMid.group() to see the content. """ fin = open(scalarFile, 'r') # need consider directory line = fin.read() # line is k file to read fin.close() ### select k as ()pattern (Using regular expression) patternMid = re.compile(r""" \( # match"(" \n # match next line ( [\+\-]?[\d]+([\.][\d]*)?([Ee][+-]?[\d]+)? # match figures \n # match next line )+ # search greedly \) # match")" """,re.DOTALL | re.VERBOSE) resMid = patternMid.search(line) return resMid
demo0/foamFileOperation.py
3,972
subFunction of readTurbStressFromFile Using regular expression to select scalar value out Args: scalarFile: The directory path of file of scalar Returns: resMid: scalar selected; you need use resMid.group() to see the content. Function is using regular expression select Vector value out Args: UFile: The directory path of file: U Returns: resMid: the U as (Ux1,Uy1,Uz1);(Ux2,Uy2,Uz2);........ Arg: fileName: The file name of OpenFOAM scalar field Regurn: a vector of scalar field Arg: tauFile: The directory path of OpenFOAM vector file (e.g., velocity) Regurn: vector: Matrix of vector Python function to manipulate OpenFOAM files Developer: Jian-Xun Wang (jwang33@nd.edu) system import Add extra path/directory Call the command line For plotting local import write it in Tautemp Regular Expression need consider directory line is U file to read select U as (X X X)pattern (Using regular expression) need consider directory line is k file to read select k as ()pattern (Using regular expression)
1,045
en
0.778491
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool limiting together/eviction with the wallet.""" from test_framework.test_framework import SchleemsTestFramework from test_framework.util import * class MempoolLimitTest(SchleemsTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]] def run_test(self): txouts = gen_return_txouts() relayfee = self.nodes[0].getnetworkinfo()['relayfee'] txids = [] utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91) #create a mempool tx that will be evicted us0 = utxos.pop() inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}] outputs = {self.nodes[0].getnewaddress() : 0.0001} tx = self.nodes[0].createrawtransaction(inputs, outputs) self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee txF = self.nodes[0].fundrawtransaction(tx) self.nodes[0].settxfee(0) # return to automatic fee selection txFS = self.nodes[0].signrawtransaction(txF['hex']) txid = self.nodes[0].sendrawtransaction(txFS['hex']) relayfee = self.nodes[0].getnetworkinfo()['relayfee'] base_fee = relayfee*100 for i in range (3): txids.append([]) txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee) # by now, the tx should be evicted, check confirmation state assert(txid not in self.nodes[0].getrawmempool()) txdata = self.nodes[0].gettransaction(txid) assert(txdata['confirmations'] == 0) #confirmation should still be 0 if __name__ == '__main__': MempoolLimitTest().main()
test/functional/mempool_limit.py
1,981
Test mempool limiting together/eviction with the wallet. !/usr/bin/env python3 Copyright (c) 2014-2016 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.create a mempool tx that will be evicted specifically fund this tx with low fee return to automatic fee selection by now, the tx should be evicted, check confirmation stateconfirmation should still be 0
466
en
0.720152
import numpy as np def evaluate(X: np.ndarray, A: float = 7.0, B: float = 0.1) -> np.ndarray: """Non-monotonic Ishigami-Homma three parameter test function: `f(x) = \sin(x_{1}) + A \sin(x_{2})^2 + Bx^{4}_{3}\sin(x_{1})` This test function is commonly used to benchmark global sensitivity methods as variance-based sensitivities of this function can be analytically determined. See listed references below. In [2], the expected first-order indices are: x1: 0.3139 x2: 0.4424 x3: 0.0 when A = 7, B = 0.1 when conducting Sobol' analysis with the Saltelli sampling method with a sample size of 1000. Parameters ---------- X : np.ndarray An `N*D` array holding values for each parameter, where `N` is the number of samples and `D` is the number of parameters (in this case, three). A : float Constant `A` parameter B : float Constant `B` parameter Returns ------- Y : np.ndarray References ---------- .. [1] Ishigami, T., Homma, T., 1990. An importance quantification technique in uncertainty analysis for computer models. Proceedings. First International Symposium on Uncertainty Modeling and Analysis. https://doi.org/10.1109/ISUMA.1990.151285 .. [2] Saltelli, A., Ratto, M., Andres, T., Campolongo, F., Cariboni, J., Gatelli, D., Saisana, M., Tarantola, S., 2008. Global Sensitivity Analysis: The Primer. Wiley, West Sussex, U.K. https://dx.doi.org/10.1002/9780470725184 """ Y = np.zeros(X.shape[0]) Y = np.sin(X[:, 0]) + A * np.power(np.sin(X[:, 1]), 2) + \ B * np.power(X[:, 2], 4) * np.sin(X[:, 0]) return Y
src/SALib/test_functions/Ishigami.py
1,856
Non-monotonic Ishigami-Homma three parameter test function: `f(x) = \sin(x_{1}) + A \sin(x_{2})^2 + Bx^{4}_{3}\sin(x_{1})` This test function is commonly used to benchmark global sensitivity methods as variance-based sensitivities of this function can be analytically determined. See listed references below. In [2], the expected first-order indices are: x1: 0.3139 x2: 0.4424 x3: 0.0 when A = 7, B = 0.1 when conducting Sobol' analysis with the Saltelli sampling method with a sample size of 1000. Parameters ---------- X : np.ndarray An `N*D` array holding values for each parameter, where `N` is the number of samples and `D` is the number of parameters (in this case, three). A : float Constant `A` parameter B : float Constant `B` parameter Returns ------- Y : np.ndarray References ---------- .. [1] Ishigami, T., Homma, T., 1990. An importance quantification technique in uncertainty analysis for computer models. Proceedings. First International Symposium on Uncertainty Modeling and Analysis. https://doi.org/10.1109/ISUMA.1990.151285 .. [2] Saltelli, A., Ratto, M., Andres, T., Campolongo, F., Cariboni, J., Gatelli, D., Saisana, M., Tarantola, S., 2008. Global Sensitivity Analysis: The Primer. Wiley, West Sussex, U.K. https://dx.doi.org/10.1002/9780470725184
1,381
en
0.501432
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import numpy as np import pytest import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.ops import operations as P class ResizeNearestNeighborAlignCornerT(nn.Cell): def __init__(self, size): super(ResizeNearestNeighborAlignCornerT, self).__init__() self.ResizeNearestNeighborAlignCornerT = P.ResizeNearestNeighbor(size, align_corners=True) def construct(self, x): return self.ResizeNearestNeighborAlignCornerT(x) class ResizeNearestNeighborAlignCornerF(nn.Cell): def __init__(self, size): super(ResizeNearestNeighborAlignCornerF, self).__init__() self.ResizeNearestNeighborAlignCornerF = P.ResizeNearestNeighbor(size, align_corners=False) def construct(self, x): return self.ResizeNearestNeighborAlignCornerF(x) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_ResizeNearestNeighborAlignCornerT(): context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float32)) expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float32) rnn = ResizeNearestNeighborAlignCornerT((4, 4)) output = rnn(input_tensor) assert np.all(output.asnumpy() == expect) input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float16)) expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float16) rnn = ResizeNearestNeighborAlignCornerT((4, 4)) output = rnn(input_tensor) assert np.all(output.asnumpy() == expect) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard def test_ResizeNearestNeighborAlignCornerF(): context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float32)) expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float32) rnn = ResizeNearestNeighborAlignCornerF((4, 4)) output = rnn(input_tensor) assert np.all(output.asnumpy() == expect) input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float16)) expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float16) rnn = ResizeNearestNeighborAlignCornerF((4, 4)) output = rnn(input_tensor) assert np.all(output.asnumpy() == expect)
tests/st/ops/gpu/test_resize_nearest_neighbor_op.py
3,145
Copyright 2019 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================
638
en
0.810121
from typing import Dict, Tuple import torch import torch.nn as nn from neuralhydrology.modelzoo.basemodel import BaseModel from neuralhydrology.modelzoo.fc import FC from neuralhydrology.modelzoo.head import get_head from neuralhydrology.utils.config import Config class EALSTM(BaseModel): """Entity-Aware LSTM (EA-LSTM) model class. This model has been proposed by Kratzert et al. [#]_ as a variant of the standard LSTM. The main difference is that the input gate of the EA-LSTM is modulated using only the static inputs, while the dynamic (time series) inputs are used in all other parts of the model (i.e. forget gate, cell update gate and output gate). To control the initial forget gate bias, use the config argument `initial_forget_bias`. Often it is useful to set this value to a positive value at the start of the model training, to keep the forget gate closed and to facilitate the gradient flow. The `EALSTM` class does only support single timescale predictions. Use `MTSLSTM` to train an LSTM-based model and get predictions on multiple temporal resolutions at the same time. Parameters ---------- cfg : Config The run configuration. References ---------- .. [#] Kratzert, F., Klotz, D., Shalev, G., Klambauer, G., Hochreiter, S., and Nearing, G.: Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets, Hydrol. Earth Syst. Sci., 23, 5089–5110, https://doi.org/10.5194/hess-23-5089-2019, 2019. """ # specify submodules of the model that can later be used for finetuning. Names must match class attributes module_parts = ['input_gate', 'dynamic_gates', 'head'] def __init__(self, cfg: Config): super(EALSTM, self).__init__(cfg=cfg) self._hidden_size = cfg.hidden_size input_size_stat = len(cfg.static_inputs + cfg.camels_attributes + cfg.hydroatlas_attributes) if cfg.use_basin_id_encoding: input_size_stat += cfg.number_of_basins # If hidden units for a embedding network are specified, create FC, otherwise single linear layer if cfg.embedding_hiddens: self.input_gate = FC(cfg=cfg) else: self.input_gate = nn.Linear(input_size_stat, cfg.hidden_size) # create tensors of learnable parameters self.dynamic_gates = _DynamicGates(cfg=cfg) self.dropout = nn.Dropout(p=cfg.output_dropout) self.head = get_head(cfg=cfg, n_in=cfg.hidden_size, n_out=self.output_size) def _cell(self, x: torch.Tensor, i: torch.Tensor, states: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: """Single time step logic of EA-LSTM cell""" h_0, c_0 = states # calculate gates gates = self.dynamic_gates(h_0, x) f, o, g = gates.chunk(3, 1) c_1 = torch.sigmoid(f) * c_0 + i * torch.tanh(g) h_1 = torch.sigmoid(o) * torch.tanh(c_1) return h_1, c_1 def forward(self, data: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """Perform a forward pass on the EA-LSTM model. Parameters ---------- data : Dict[str, torch.Tensor] Dictionary, containing input features as key-value pairs. Returns ------- Dict[str, torch.Tensor] Model outputs and intermediate states as a dictionary. - `y_hat`: model predictions of shape [batch size, sequence length, number of target variables]. - `h_n`: hidden state at the last time step of the sequence of shape [batch size, sequence length, number of target variables]. - `c_n`: cell state at the last time step of the sequence of shape [batch size, sequence length, number of target variables]. """ # transpose to [seq_length, batch_size, n_features] x_d = data['x_d'].transpose(0, 1) if 'x_s' in data and 'x_one_hot' in data: x_s = torch.cat([data['x_s'], data['x_one_hot']], dim=-1) elif 'x_s' in data: x_s = data['x_s'] elif 'x_one_hot' in data: x_s = data['x_one_hot'] else: raise ValueError('Need x_s or x_one_hot in forward pass.') # TODO: move hidden and cell state initialization to init and only reset states in forward pass to zero. h_t = x_d.data.new(x_d.shape[1], self._hidden_size).zero_() c_t = x_d.data.new(x_d.shape[1], self._hidden_size).zero_() # empty lists to temporally store all intermediate hidden/cell states h_n, c_n = [], [] # calculate input gate only once because inputs are static i = torch.sigmoid(self.input_gate(x_s)) # perform forward steps over input sequence for x_dt in x_d: h_t, c_t = self._cell(x_dt, i, (h_t, c_t)) # store intermediate hidden/cell state in list h_n.append(h_t) c_n.append(c_t) h_n = torch.stack(h_n, 0).transpose(0, 1) c_n = torch.stack(c_n, 0).transpose(0, 1) pred = {'h_n': h_n, 'c_n': c_n} pred.update(self.head(self.dropout(h_n))) return pred class _DynamicGates(nn.Module): """Internal class to wrap the dynamic gate parameters into a dedicated PyTorch Module""" def __init__(self, cfg: Config): super(_DynamicGates, self).__init__() self.cfg = cfg self.weight_ih = nn.Parameter(torch.FloatTensor(len(cfg.dynamic_inputs), 3 * cfg.hidden_size)) self.weight_hh = nn.Parameter(torch.FloatTensor(cfg.hidden_size, 3 * cfg.hidden_size)) self.bias = nn.Parameter(torch.FloatTensor(3 * cfg.hidden_size)) # initialize parameters self._reset_parameters() def _reset_parameters(self): """Special initialization of certain model weights.""" nn.init.orthogonal_(self.weight_ih.data) weight_hh_data = torch.eye(self.cfg.hidden_size) weight_hh_data = weight_hh_data.repeat(1, 3) self.weight_hh.data = weight_hh_data nn.init.constant_(self.bias.data, val=0) if self.cfg.initial_forget_bias is not None: self.bias.data[:self.cfg.hidden_size] = self.cfg.initial_forget_bias def forward(self, h: torch.Tensor, x_d: torch.Tensor): gates = h @ self.weight_hh + x_d @ self.weight_ih + self.bias return gates
neuralhydrology/modelzoo/ealstm.py
6,511
Entity-Aware LSTM (EA-LSTM) model class. This model has been proposed by Kratzert et al. [#]_ as a variant of the standard LSTM. The main difference is that the input gate of the EA-LSTM is modulated using only the static inputs, while the dynamic (time series) inputs are used in all other parts of the model (i.e. forget gate, cell update gate and output gate). To control the initial forget gate bias, use the config argument `initial_forget_bias`. Often it is useful to set this value to a positive value at the start of the model training, to keep the forget gate closed and to facilitate the gradient flow. The `EALSTM` class does only support single timescale predictions. Use `MTSLSTM` to train an LSTM-based model and get predictions on multiple temporal resolutions at the same time. Parameters ---------- cfg : Config The run configuration. References ---------- .. [#] Kratzert, F., Klotz, D., Shalev, G., Klambauer, G., Hochreiter, S., and Nearing, G.: Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets, Hydrol. Earth Syst. Sci., 23, 5089–5110, https://doi.org/10.5194/hess-23-5089-2019, 2019. Internal class to wrap the dynamic gate parameters into a dedicated PyTorch Module Single time step logic of EA-LSTM cell Special initialization of certain model weights. Perform a forward pass on the EA-LSTM model. Parameters ---------- data : Dict[str, torch.Tensor] Dictionary, containing input features as key-value pairs. Returns ------- Dict[str, torch.Tensor] Model outputs and intermediate states as a dictionary. - `y_hat`: model predictions of shape [batch size, sequence length, number of target variables]. - `h_n`: hidden state at the last time step of the sequence of shape [batch size, sequence length, number of target variables]. - `c_n`: cell state at the last time step of the sequence of shape [batch size, sequence length, number of target variables]. specify submodules of the model that can later be used for finetuning. Names must match class attributes If hidden units for a embedding network are specified, create FC, otherwise single linear layer create tensors of learnable parameters calculate gates transpose to [seq_length, batch_size, n_features] TODO: move hidden and cell state initialization to init and only reset states in forward pass to zero. empty lists to temporally store all intermediate hidden/cell states calculate input gate only once because inputs are static perform forward steps over input sequence store intermediate hidden/cell state in list initialize parameters
2,684
en
0.769701
import logging from pathlib import Path from taskcat._config import Config from taskcat.iam_policy.policy import CFNPolicyGenerator LOG = logging.getLogger(__name__) class GenerateIAMPolicy: """ [ALPHA] Introspects CFN Template(s) and generates an IAM policy necessary to successfully launch the template(s) """ CLINAME = "generate-iam-policy" def __init__( self, output_file: str = "./cfn_stack_policy.json", project_root: str = "./" ): project_root_path = Path(project_root).expanduser().resolve() config = Config.create(project_root=project_root_path) CFNPolicyGenerator(config, output_file).generate_policy()
taskcat/_cli_modules/generate_iam_policy.py
677
[ALPHA] Introspects CFN Template(s) and generates an IAM policy necessary to successfully launch the template(s)
112
en
0.605976
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import numpy as np import os from functools import partial import logging import time import paddle import paddle.fluid as fluid import argparse import network import reader logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger("fluid") logger.setLevel(logging.INFO) def parse_args(): parser = argparse.ArgumentParser("gnn") parser.add_argument( '--train_path', type=str, default='./data/diginetica/train.txt', help='dir of training data') parser.add_argument( '--config_path', type=str, default='./data/diginetica/config.txt', help='dir of config') parser.add_argument( '--model_path', type=str, default='./saved_model', help="path of model parameters") parser.add_argument( '--epoch_num', type=int, default=30, help='number of epochs to train for') parser.add_argument( '--batch_size', type=int, default=100, help='input batch size') parser.add_argument( '--hidden_size', type=int, default=100, help='hidden state size') parser.add_argument( '--l2', type=float, default=1e-5, help='l2 penalty') parser.add_argument( '--lr', type=float, default=0.001, help='learning rate') parser.add_argument( '--step', type=int, default=1, help='gnn propogation steps') parser.add_argument( '--lr_dc', type=float, default=0.1, help='learning rate decay rate') parser.add_argument( '--lr_dc_step', type=int, default=3, help='the number of steps after which the learning rate decay') parser.add_argument( '--use_cuda', type=int, default=0, help='whether to use gpu') parser.add_argument( '--use_parallel', type=int, default=1, help='whether to use parallel executor') parser.add_argument( '--enable_ce', action='store_true', help='If set, run the task with continuous evaluation logs.') return parser.parse_args() def train(): args = parse_args() if args.enable_ce: SEED = 102 fluid.default_main_program().random_seed = SEED fluid.default_startup_program().random_seed = SEED batch_size = args.batch_size items_num = reader.read_config(args.config_path) loss, acc, py_reader, feed_datas = network.network(items_num, args.hidden_size, args.step) data_reader = reader.Data(args.train_path, True) logger.info("load data complete") use_cuda = True if args.use_cuda else False use_parallel = True if args.use_parallel else False place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) step_per_epoch = data_reader.length // batch_size optimizer = fluid.optimizer.Adam( learning_rate=fluid.layers.exponential_decay( learning_rate=args.lr, decay_steps=step_per_epoch * args.lr_dc_step, decay_rate=args.lr_dc), regularization=fluid.regularizer.L2DecayRegularizer( regularization_coeff=args.l2)) optimizer.minimize(loss) exe.run(fluid.default_startup_program()) all_vocab = fluid.global_scope().var("all_vocab").get_tensor() all_vocab.set( np.arange(1, items_num).astype("int64").reshape((-1, 1)), place) feed_list = [e.name for e in feed_datas] if use_parallel: train_exe = fluid.ParallelExecutor( use_cuda=use_cuda, loss_name=loss.name) else: train_exe = exe logger.info("begin train") total_time = [] ce_info = [] start_time = time.time() loss_sum = 0.0 acc_sum = 0.0 global_step = 0 PRINT_STEP = 500 py_reader.decorate_paddle_reader(data_reader.reader(batch_size, batch_size * 20, True)) for i in range(args.epoch_num): epoch_sum = [] py_reader.start() try: while True: res = train_exe.run(fetch_list=[loss.name, acc.name]) loss_sum += res[0].mean() acc_sum += res[1].mean() epoch_sum.append(res[0].mean()) global_step += 1 if global_step % PRINT_STEP == 0: ce_info.append([loss_sum / PRINT_STEP, acc_sum / PRINT_STEP]) total_time.append(time.time() - start_time) logger.info("global_step: %d, loss: %.4lf, train_acc: %.4lf" % ( global_step, loss_sum / PRINT_STEP, acc_sum / PRINT_STEP)) loss_sum = 0.0 acc_sum = 0.0 start_time = time.time() except fluid.core.EOFException: py_reader.reset() logger.info("epoch loss: %.4lf" % (np.mean(epoch_sum))) save_dir = os.path.join(args.model_path, "epoch_" + str(i)) fetch_vars = [loss, acc] fluid.io.save_inference_model(save_dir, feed_list, fetch_vars, exe) logger.info("model saved in " + save_dir) # only for ce if args.enable_ce: gpu_num = get_cards(args) ce_loss = 0 ce_acc = 0 ce_time = 0 try: ce_loss = ce_info[-1][0] ce_acc = ce_info[-1][1] ce_time = total_time[-1] except: print("ce info error") print("kpis\teach_pass_duration_card%s\t%s" % (gpu_num, ce_time)) print("kpis\ttrain_loss_card%s\t%f" % (gpu_num, ce_loss)) print("kpis\ttrain_acc_card%s\t%f" % (gpu_num, ce_acc)) def get_cards(args): num = 0 cards = os.environ.get('CUDA_VISIBLE_DEVICES') num = len(cards.split(",")) return num if __name__ == "__main__": train()
PaddleRec/gnn/train.py
6,254
Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. only for ce
585
en
0.821144
""" This module provides two implementations for the rod-cutting problem: 1. A naive recursive implementation which has an exponential runtime 2. Two dynamic programming implementations which have quadratic runtime The rod-cutting problem is the problem of finding the maximum possible revenue obtainable from a rod of length ``n`` given a list of prices for each integral piece of the rod. The maximum revenue can thus be obtained by cutting the rod and selling the pieces separately or not cutting it at all if the price of it is the maximum obtainable. """ def naive_cut_rod_recursive(n: int, prices: list): """ Solves the rod-cutting problem via naively without using the benefit of dynamic programming. The results is the same sub-problems are solved several times leading to an exponential runtime Runtime: O(2^n) Arguments ------- n: int, the length of the rod prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` Returns ------- The maximum revenue obtainable for a rod of length n given the list of prices for each piece. Examples -------- >>> naive_cut_rod_recursive(4, [1, 5, 8, 9]) 10 >>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) 30 """ _enforce_args(n, prices) if n == 0: return 0 max_revue = float("-inf") for i in range(1, n + 1): max_revue = max( max_revue, prices[i - 1] + naive_cut_rod_recursive(n - i, prices) ) return max_revue def top_down_cut_rod(n: int, prices: list): """ Constructs a top-down dynamic programming solution for the rod-cutting problem via memoization. This function serves as a wrapper for _top_down_cut_rod_recursive Runtime: O(n^2) Arguments -------- n: int, the length of the rod prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` Note ---- For convenience and because Python's lists using 0-indexing, length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of length 0. Returns ------- The maximum revenue obtainable for a rod of length n given the list of prices for each piece. Examples ------- >>> top_down_cut_rod(4, [1, 5, 8, 9]) 10 >>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) 30 """ _enforce_args(n, prices) max_rev = [float("-inf") for _ in range(n + 1)] return _top_down_cut_rod_recursive(n, prices, max_rev) def _top_down_cut_rod_recursive(n: int, prices: list, max_rev: list): """ Constructs a top-down dynamic programming solution for the rod-cutting problem via memoization. Runtime: O(n^2) Arguments -------- n: int, the length of the rod prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` max_rev: list, the computed maximum revenue for a piece of rod. ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i`` Returns ------- The maximum revenue obtainable for a rod of length n given the list of prices for each piece. """ if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: max_revenue = float("-inf") for i in range(1, n + 1): max_revenue = max( max_revenue, prices[i - 1] + _top_down_cut_rod_recursive(n - i, prices, max_rev), ) max_rev[n] = max_revenue return max_rev[n] def bottom_up_cut_rod(n: int, prices: list): """ Constructs a bottom-up dynamic programming solution for the rod-cutting problem Runtime: O(n^2) Arguments ---------- n: int, the maximum length of the rod. prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` Returns ------- The maximum revenue obtainable from cutting a rod of length n given the prices for each piece of rod p. Examples ------- >>> bottom_up_cut_rod(4, [1, 5, 8, 9]) 10 >>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) 30 """ _enforce_args(n, prices) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. max_rev = [float("-inf") for _ in range(n + 1)] max_rev[0] = 0 for i in range(1, n + 1): max_revenue_i = max_rev[i] for j in range(1, i + 1): max_revenue_i = max(max_revenue_i, prices[j - 1] + max_rev[i - j]) max_rev[i] = max_revenue_i return max_rev[n] def _enforce_args(n: int, prices: list): """ Basic checks on the arguments to the rod-cutting algorithms n: int, the length of the rod prices: list, the price list for each piece of rod. Throws ValueError: if n is negative or there are fewer items in the price list than the length of the rod """ if n < 0: raise ValueError(f"n must be greater than or equal to 0. Got n = {n}") if n > len(prices): raise ValueError( "Each integral piece of rod must have a corresponding " f"price. Got n = {n} but length of prices = {len(prices)}" ) def main(): prices = [6, 10, 12, 15, 20, 23] n = len(prices) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. expected_max_revenue = 36 max_rev_top_down = top_down_cut_rod(n, prices) max_rev_bottom_up = bottom_up_cut_rod(n, prices) max_rev_naive = naive_cut_rod_recursive(n, prices) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
dynamic_programming/rod_cutting.py
5,903
Basic checks on the arguments to the rod-cutting algorithms n: int, the length of the rod prices: list, the price list for each piece of rod. Throws ValueError: if n is negative or there are fewer items in the price list than the length of the rod Constructs a top-down dynamic programming solution for the rod-cutting problem via memoization. Runtime: O(n^2) Arguments -------- n: int, the length of the rod prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` max_rev: list, the computed maximum revenue for a piece of rod. ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i`` Returns ------- The maximum revenue obtainable for a rod of length n given the list of prices for each piece. Constructs a bottom-up dynamic programming solution for the rod-cutting problem Runtime: O(n^2) Arguments ---------- n: int, the maximum length of the rod. prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` Returns ------- The maximum revenue obtainable from cutting a rod of length n given the prices for each piece of rod p. Examples ------- >>> bottom_up_cut_rod(4, [1, 5, 8, 9]) 10 >>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) 30 Solves the rod-cutting problem via naively without using the benefit of dynamic programming. The results is the same sub-problems are solved several times leading to an exponential runtime Runtime: O(2^n) Arguments ------- n: int, the length of the rod prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` Returns ------- The maximum revenue obtainable for a rod of length n given the list of prices for each piece. Examples -------- >>> naive_cut_rod_recursive(4, [1, 5, 8, 9]) 10 >>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) 30 Constructs a top-down dynamic programming solution for the rod-cutting problem via memoization. This function serves as a wrapper for _top_down_cut_rod_recursive Runtime: O(n^2) Arguments -------- n: int, the length of the rod prices: list, the prices for each piece of rod. ``p[i-i]`` is the price for a rod of length ``i`` Note ---- For convenience and because Python's lists using 0-indexing, length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of length 0. Returns ------- The maximum revenue obtainable for a rod of length n given the list of prices for each piece. Examples ------- >>> top_down_cut_rod(4, [1, 5, 8, 9]) 10 >>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) 30 This module provides two implementations for the rod-cutting problem: 1. A naive recursive implementation which has an exponential runtime 2. Two dynamic programming implementations which have quadratic runtime The rod-cutting problem is the problem of finding the maximum possible revenue obtainable from a rod of length ``n`` given a list of prices for each integral piece of the rod. The maximum revenue can thus be obtained by cutting the rod and selling the pieces separately or not cutting it at all if the price of it is the maximum obtainable. length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of length 0. the best revenue comes from cutting the rod into 6 pieces, each of length 1 resulting in a revenue of 6 * 6 = 36.
3,355
en
0.805453
# Copyright (c) 2020 by Fraunhofer Institute for Energy Economics # and Energy System Technology (IEE), Kassel. All rights reserved. # Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. from pandapower.plotting.geo import _node_geometries_from_geodata, \ _transform_node_geometry_to_geodata, _branch_geometries_from_geodata, \ _transform_branch_geometry_to_coords, _convert_xy_epsg def convert_gis_to_geodata(net, node_geodata=True, branch_geodata=True): """ Extracts information on bus and line geodata from the geometries of a geopandas geodataframe. :param net: The net for which to convert the geodata :type net: pandapowerNet :param node_geodata: flag if to extract x and y values for bus geodata :type node_geodata: bool, default True :param branch_geodata: flag if to extract coordinates values for line geodata :type branch_geodata: bool, default True :return: No output. """ if node_geodata: _transform_node_geometry_to_geodata(net.junction_geodata) if branch_geodata: _transform_branch_geometry_to_coords(net.pipe_geodata) def convert_geodata_to_gis(net, epsg=31467, node_geodata=True, branch_geodata=True): """ Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective geometries. :param net: The net for which to convert the geodata :type net: pandapowerNet :param epsg: current epsg projection :type epsg: int, default 4326 (= WGS84) :param node_geodata: flag if to transform the bus geodata table :type node_geodata: bool, default True :param branch_geodata: flag if to transform the line geodata table :type branch_geodata: bool, default True :return: No output. """ if node_geodata: net["bus_geodata"] = _node_geometries_from_geodata(net["bus_geodata"], epsg) if branch_geodata: net["line_geodata"] = _branch_geometries_from_geodata(net["line_geodata"], epsg) net["gis_epsg_code"] = epsg def convert_epsg_bus_geodata(net, epsg_in=4326, epsg_out=31467): """ Converts bus geodata in net from epsg_in to epsg_out :param net: The pandapower network :type net: pandapowerNet :param epsg_in: current epsg projection :type epsg_in: int, default 4326 (= WGS84) :param epsg_out: epsg projection to be transformed to :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3) :return: net - the given pandapower network (no copy!) """ net['bus_geodata'].loc[:, "x"], net['bus_geodata'].loc[:, "y"] = _convert_xy_epsg( net['bus_geodata'].loc[:, "x"], net['bus_geodata'].loc[:, "y"], epsg_in, epsg_out) return net
pandapipes/plotting/geo.py
2,722
Converts bus geodata in net from epsg_in to epsg_out :param net: The pandapower network :type net: pandapowerNet :param epsg_in: current epsg projection :type epsg_in: int, default 4326 (= WGS84) :param epsg_out: epsg projection to be transformed to :type epsg_out: int, default 31467 (= Gauss-Krüger Zone 3) :return: net - the given pandapower network (no copy!) Transforms the bus and line geodata of a net into a geopandaas geodataframe with the respective geometries. :param net: The net for which to convert the geodata :type net: pandapowerNet :param epsg: current epsg projection :type epsg: int, default 4326 (= WGS84) :param node_geodata: flag if to transform the bus geodata table :type node_geodata: bool, default True :param branch_geodata: flag if to transform the line geodata table :type branch_geodata: bool, default True :return: No output. Extracts information on bus and line geodata from the geometries of a geopandas geodataframe. :param net: The net for which to convert the geodata :type net: pandapowerNet :param node_geodata: flag if to extract x and y values for bus geodata :type node_geodata: bool, default True :param branch_geodata: flag if to extract coordinates values for line geodata :type branch_geodata: bool, default True :return: No output. Copyright (c) 2020 by Fraunhofer Institute for Energy Economics and Energy System Technology (IEE), Kassel. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
1,510
en
0.553763
# -*- coding: UTF-8 -*- import datetime import json from django.contrib.auth.hashers import check_password, make_password from django.core import serializers from django.db import connection from django.http import HttpResponse from django.shortcuts import render from django.views.decorators.csrf import csrf_exempt from cmdb.models import host, hostUser, dbGroup, dbInstance from utils.jsonExt import DateEncoder from utils.logUtil import getLogger # from cmdb.models import dbCluster logger = getLogger() @csrf_exempt def addChangeHostInfo(request): ''' 新增主机 修改主机 ''' v_hostId = request.POST.get('host_id') v_businessName = request.POST.get('business_name') v_serviceEnv = request.POST.get('service_env') v_hostName = request.POST.get('host_name') v_intranetIpAddr = request.POST.get('intranet_ipaddr') v_publicIpAddr = request.POST.get('public_ipaddr') v_sshPort = request.POST.get('ssh_port') v_hostType = request.POST.get('host_type') v_hostRole = request.POST.get('host_role') v_hostDesc = request.POST.get('host_desc') print(v_hostId, v_businessName, v_serviceEnv, v_hostName, v_intranetIpAddr, v_publicIpAddr, v_sshPort, v_hostType, v_hostRole, v_hostDesc) if v_hostId == '' or v_hostId is None: # 新增 try: hostObj = host(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc) hostObj.save() result = {'status':1, 'msg':'保存成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: result = {'status':2, 'msg':'保存失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: # 修改 try: hostObj = host.objects.filter(id=v_hostId) hostObj.update(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc) # masterConfigObj.save() result = {'status':1, 'msg':'修改成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: result = {'status':2, 'msg':'修改失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def getHostDetailInfo(request): hostId = request.POST['hostId'] try: hostObj = host.objects.get(id=hostId) hostJson = hostObj.toJSON() result = {'status':1, 'msg':'请求成功', 'obj':hostJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def delHost(request): hostId = request.POST['hostId'] if hostId == "" or hostId is None: result = {'status':3, 'msg':'未选中任何记录!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: try: delResult = host.objects.filter(id=hostId).delete() print(delResult) result = {'status':1, 'msg':'删除成功!', 'data':delResult} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'删除失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def addChangeHostUserInfo(request): ''' 新增主机用户 修改主机用户 ''' v_hostUserId = request.POST.get('host_user_id') v_hostId = request.POST.get('host_id') v_hostUser = request.POST.get('host_user') v_hostPasswd = request.POST.get('host_passwd') v_userDesc = request.POST.get('user_desc') print(v_hostUserId, v_hostId, v_hostUser, v_hostPasswd, v_userDesc) if v_hostUserId == '' or v_hostUserId is None: # 新增 try: hostObj = host.objects.get(id=v_hostId) hostUserObj = hostUser(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc, host=hostObj) hostUserObj.save() result = {'status':1, 'msg':'保存成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'保存失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: # 修改 try: hostUserObj = hostUser.objects.filter(id=v_hostUserId) hostUserObj.update(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc) # masterConfigObj.save() result = {'status':1, 'msg':'修改成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'修改失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def getHostUserDetailInfo(request): hostUserId = request.POST['hostUserId'].strip() try: hostUserInfo = hostUser.objects.filter(id=hostUserId) hostUserJson = serializers.serialize("json", hostUserInfo, use_natural_foreign_keys=True) result = {'status':1, 'msg':'请求成功', 'hostUserJson':hostUserJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def delHostUser(request): hostUserId = request.POST['hostUserId'] if hostUserId == "" or hostUserId is None: result = {'status':3, 'msg':'未选中任何记录!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: try: delResult = hostUser.objects.filter(id=hostUserId).delete() print(delResult) logger.error(delResult) result = {'status':1, 'msg':'删除成功!', 'data':delResult} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) logger.error(e) result = {'status':2, 'msg':'删除失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def addChangeDbGroupInfo(request): ''' 新增数据库组 修改数据库组 ''' v_groupId = request.POST.get('group_id') v_businessName = request.POST.get('business_name') v_groupName = request.POST.get('group_name') v_groupStatus = request.POST.get('group_status') v_groupDesc = request.POST.get('group_desc') v_groupEnv = request.POST.get('group_env') print(v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc) logger.info("保存或修改数据库组信息,接收前端参数:", v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc) if v_groupId == '' or v_groupId is None: # 新增 try: dbGroupObj = dbGroup(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc) dbGroupObj.save() result = {'status':1, 'msg':'保存成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'保存失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: # 修改 try: dbGroupObj = dbGroup.objects.filter(id=v_groupId) dbGroupObj.update(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc) # masterConfigObj.save() result = {'status':1, 'msg':'修改成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'修改失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') # @csrf_exempt # def getDbClusterDetailInfo(request): # clusterId = request.POST['clusterId'] # # try: # dbClusterObj = dbCluster.objects.get(id=clusterId) # dbClusterJson = dbClusterObj.toJSON() # # result = {'status':1, 'msg':'请求成功', 'obj':dbClusterJson} # print(result) # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # print(e) # result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def getDbGroupDetailInfo(request): groupId = request.POST['groupId'] try: dbGroupObj = dbGroup.objects.get(id=groupId) dbGroupJson = dbGroupObj.toJSON() result = {'status':1, 'msg':'请求成功', 'obj':dbGroupJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def addChangeDbInstanceInfo(request): ''' 新增数据库实例 修改数据库实例 ''' v_instanceId = request.POST.get('instance_id') v_groupId = request.POST.get('group_id') v_host_id = request.POST.get('host_id') v_instanceName = request.POST.get('instance_env') v_instanceType = request.POST.get('instance_type') v_portNum = request.POST.get('port_num') v_instanceRole = request.POST.get('instance_role') v_instanceStatus = request.POST.get('instance_status') v_instanceDesc = request.POST.get('instance_desc') print(v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc) logger.info("保存或修改数据库实例信息,接收前端参数:", v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc) if v_instanceId == '' or v_instanceId is None: # 新增 try: dbGroupObj = dbGroup.objects.get(id=v_groupId) hostObj = host.objects.get(id=v_host_id) print(hostObj) dbInstanceObj = dbInstance(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc) dbInstanceObj.save() result = {'status':1, 'msg':'保存成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) logger.error(str(e)) result = {'status':2, 'msg':'保存失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: # 修改 try: dbGroupObj = dbGroup.objects.get(id=v_groupId) hostObj = host.objects.get(id=v_host_id) dbInstanceObj = dbInstance.objects.filter(id=v_instanceId) dbInstanceObj.update(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc) # masterConfigObj.save() result = {'status':1, 'msg':'修改成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'修改失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def getDbInstanceDetailInfo(request): instanceId = request.POST['instanceId'].strip() try: dbInstanceInfo = dbInstance.objects.filter(id=instanceId) dbInstanceJson = serializers.serialize("json", dbInstanceInfo, use_natural_foreign_keys=True) result = {'status':1, 'msg':'请求成功', 'dbInstanceJson':dbInstanceJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') # conn = connection.cursor() # try: # conn.execute('SELECT cdi.*, ch.host_name, ch.intranet_ip_addr, cdg.group_name FROM cmdb_db_instance cdi inner join cmdb_host ch on cdi.host = ch.id inner join cmdb_db_group cdg on cdi.db_group = cdg.id WHERE cdi.id = %s', [instanceId]) # dbInstanceInfo = conn.fetchall() # print(dbInstanceInfo) # dbInstanceJson = serializers.serialize("json", dbInstanceInfo) # result = {'status':1, 'msg':'请求成功', 'dbInstanceInfo':dbInstanceInfo} # print(result) # return HttpResponse(json.dumps(result, cls=DateEncoder), content_type='application/json') # except Exception as e: # print(e) # result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # finally: # conn.close() # try: # dbInstanceInfo = dbInstance.objects.raw('SELECT * FROM cmdb_db_instance WHERE id = %d', [instanceId]) # dbInstanceJson = serializers.serialize("json", dbInstanceInfo) # # print(dbInstanceJson[0].fields.host) # print(type(dbInstanceJson[0].fields.host)) # # hostInfo = host.objects.raw('SELECT * FROM cmdb_host WHERE id = %d', [int(dbInstanceJson[0].fields.host)]) # hostJson = serializers.serialize("json", hostInfo) # print(hostJson) # # result = {'status':1, 'msg':'请求成功', 'dbInstanceJson':dbInstanceJson} # print(result) # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # print(e) # result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def delDbInstance(request): instanceId = request.POST['instanceId'] if instanceId == "" or instanceId is None: result = {'status':3, 'msg':'未选中任何记录!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: try: delResult = dbInstance.objects.filter(id=instanceId).delete() print(delResult) logger.error(delResult) result = {'status':1, 'msg':'删除成功!', 'data':delResult} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) logger.error(e) result = {'status':2, 'msg':'删除失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') # @csrf_exempt # def addChangeDbClusterInfo(request): # ''' # 新增集群信息 # 修改集群信息 # ''' # v_clusterId = request.POST.get('cluster_id') # v_clusterName = request.POST.get('cluster_name') # v_clusterStatus = request.POST.get('cluster_status') # v_clusterDesc = request.POST.get('cluster_desc') # # print("begin add Cluster: ", v_clusterId, v_clusterName, v_clusterStatus, v_clusterDesc) # # if v_clusterId == '' or v_clusterId is None: # # 新增 # try: # dbClusterObj = dbCluster(clusterName=v_clusterName, clusterStatus=v_clusterStatus, clusterDesc=v_clusterDesc) # dbClusterObj.save() # result = {'status':1, 'msg':'保存成功!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # logger.error(str(e)) # result = {'status':2, 'msg':'保存失败!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # else: # # 修改 # try: # dbClusterObj = dbCluster.objects.filter(id=v_clusterId) # dbClusterObj.update(clusterName=v_clusterName, clusterStatus=v_clusterStatus, clusterDesc=v_clusterDesc) # # masterConfigObj.save() # result = {'status':1, 'msg':'修改成功!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # logger.error(str(e)) # result = {'status':2, 'msg':'修改失败!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # # @csrf_exempt # def delDbCluster(request): # v_clusterId = request.POST['cluster_id'] # # if v_clusterId == "" or v_clusterId is None: # result = {'status':3, 'msg':'未选中任何记录!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json') # else: # try: # delResult = dbCluster.objects.filter(id=v_clusterId).delete() # print(delResult) # logger.info(delResult) # result = {'status':1, 'msg':'删除成功!', 'data':delResult} # return HttpResponse(json.dumps(result), content_type='application/json') # except Exception as e: # print(e) # logger.error(e) # result = {'status':2, 'msg':'删除失败!', 'data':''} # return HttpResponse(json.dumps(result), content_type='application/json')
cmdb/views_ajax.py
19,378
新增数据库组 修改数据库组 新增数据库实例 修改数据库实例 新增主机 修改主机 新增主机用户 修改主机用户 -*- coding: UTF-8 -*- from cmdb.models import dbCluster 新增 修改 masterConfigObj.save() 新增 修改 masterConfigObj.save() 新增 修改 masterConfigObj.save() @csrf_exempt def getDbClusterDetailInfo(request): clusterId = request.POST['clusterId'] try: dbClusterObj = dbCluster.objects.get(id=clusterId) dbClusterJson = dbClusterObj.toJSON() result = {'status':1, 'msg':'请求成功', 'obj':dbClusterJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') 新增 修改 masterConfigObj.save() conn = connection.cursor() try: conn.execute('SELECT cdi.*, ch.host_name, ch.intranet_ip_addr, cdg.group_name FROM cmdb_db_instance cdi inner join cmdb_host ch on cdi.host = ch.id inner join cmdb_db_group cdg on cdi.db_group = cdg.id WHERE cdi.id = %s', [instanceId]) dbInstanceInfo = conn.fetchall() print(dbInstanceInfo) dbInstanceJson = serializers.serialize("json", dbInstanceInfo) result = {'status':1, 'msg':'请求成功', 'dbInstanceInfo':dbInstanceInfo} print(result) return HttpResponse(json.dumps(result, cls=DateEncoder), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') finally: conn.close() try: dbInstanceInfo = dbInstance.objects.raw('SELECT * FROM cmdb_db_instance WHERE id = %d', [instanceId]) dbInstanceJson = serializers.serialize("json", dbInstanceInfo) print(dbInstanceJson[0].fields.host) print(type(dbInstanceJson[0].fields.host)) hostInfo = host.objects.raw('SELECT * FROM cmdb_host WHERE id = %d', [int(dbInstanceJson[0].fields.host)]) hostJson = serializers.serialize("json", hostInfo) print(hostJson) result = {'status':1, 'msg':'请求成功', 'dbInstanceJson':dbInstanceJson} print(result) return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def addChangeDbClusterInfo(request): ''' 新增集群信息 修改集群信息 ''' v_clusterId = request.POST.get('cluster_id') v_clusterName = request.POST.get('cluster_name') v_clusterStatus = request.POST.get('cluster_status') v_clusterDesc = request.POST.get('cluster_desc') print("begin add Cluster: ", v_clusterId, v_clusterName, v_clusterStatus, v_clusterDesc) if v_clusterId == '' or v_clusterId is None: 新增 try: dbClusterObj = dbCluster(clusterName=v_clusterName, clusterStatus=v_clusterStatus, clusterDesc=v_clusterDesc) dbClusterObj.save() result = {'status':1, 'msg':'保存成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'保存失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: 修改 try: dbClusterObj = dbCluster.objects.filter(id=v_clusterId) dbClusterObj.update(clusterName=v_clusterName, clusterStatus=v_clusterStatus, clusterDesc=v_clusterDesc) masterConfigObj.save() result = {'status':1, 'msg':'修改成功!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: logger.error(str(e)) result = {'status':2, 'msg':'修改失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') @csrf_exempt def delDbCluster(request): v_clusterId = request.POST['cluster_id'] if v_clusterId == "" or v_clusterId is None: result = {'status':3, 'msg':'未选中任何记录!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json') else: try: delResult = dbCluster.objects.filter(id=v_clusterId).delete() print(delResult) logger.info(delResult) result = {'status':1, 'msg':'删除成功!', 'data':delResult} return HttpResponse(json.dumps(result), content_type='application/json') except Exception as e: print(e) logger.error(e) result = {'status':2, 'msg':'删除失败!', 'data':''} return HttpResponse(json.dumps(result), content_type='application/json')
5,069
en
0.237085
### ### # Imports # ### ### import datetime, os, plistlib, struct, sys, itertools from io import BytesIO if sys.version_info < (3,0): # Force use of StringIO instead of cStringIO as the latter # has issues with Unicode strings from StringIO import StringIO try: FMT_XML = plistlib.FMT_XML FMT_BINARY = plistlib.FMT_BINARY except: FMT_XML = "FMT_XML" FMT_BINARY = "FMT_BINARY" ### ### # Helper Methods # ### ### def _check_py3(): return True if sys.version_info >= (3, 0) else False def _is_binary(fp): if isinstance(fp, _get_inst()): return fp.startswith(b"bplist00") header = fp.read(32) fp.seek(0) return header[:8] == b'bplist00' def _get_inst(): if _check_py3(): return (str) else: return (str, unicode) ### ### # Deprecated Functions - Remapped # ### ### def readPlist(pathOrFile): if not isinstance(pathOrFile, _get_inst()): return load(pathOrFile) with open(pathOrFile, "rb") as f: return load(f) def writePlist(value, pathOrFile): if not isinstance(pathOrFile, _get_inst()): return dump(value, pathOrFile, fmt=FMT_XML, sort_keys=True, skipkeys=False) with open(pathOrFile, "wb") as f: return dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False) ### ### # Remapped Functions # ### ### def load(fp, fmt=None, use_builtin_types=None, dict_type=dict): if _check_py3(): use_builtin_types = True if use_builtin_types == None else use_builtin_types # We need to monkey patch this to allow for hex integers - code taken/modified from # https://github.com/python/cpython/blob/3.8/Lib/plistlib.py if fmt is None: header = fp.read(32) fp.seek(0) for info in plistlib._FORMATS.values(): if info['detect'](header): P = info['parser'] break else: raise plistlib.InvalidFileException() else: P = plistlib._FORMATS[fmt]['parser'] p = P(use_builtin_types=use_builtin_types, dict_type=dict_type) if isinstance(p,plistlib._PlistParser): # Monkey patch! def end_integer(): d = p.get_data() p.add_object(int(d,16) if d.lower().startswith("0x") else int(d)) p.end_integer = end_integer return p.parse(fp) elif not _is_binary(fp): # Is not binary - assume a string - and try to load # We avoid using readPlistFromString() as that uses # cStringIO and fails when Unicode strings are detected # Don't subclass - keep the parser local from xml.parsers.expat import ParserCreate # Create a new PlistParser object - then we need to set up # the values and parse. p = plistlib.PlistParser() # We also need to monkey patch this to allow for other dict_types def begin_dict(attrs): d = dict_type() p.addObject(d) p.stack.append(d) def end_integer(): d = p.getData() p.addObject(int(d,16) if d.lower().startswith("0x") else int(d)) p.begin_dict = begin_dict p.end_integer = end_integer parser = ParserCreate() parser.StartElementHandler = p.handleBeginElement parser.EndElementHandler = p.handleEndElement parser.CharacterDataHandler = p.handleData if isinstance(fp, unicode): # Encode unicode -> string; use utf-8 for safety fp = fp.encode("utf-8") if isinstance(fp,_get_inst()): # It's a string - let's wrap it up fp = StringIO(fp) # Parse it parser.ParseFile(fp) return p.root else: use_builtin_types = False if use_builtin_types == None else use_builtin_types p = _BinaryPlistParser(use_builtin_types=use_builtin_types, dict_type=dict_type) return p.parse(fp) def loads(value, fmt=None, use_builtin_types=None, dict_type=dict): if _check_py3() and isinstance(value, _get_inst()): # If it's a string - encode it value = value.encode() return load(BytesIO(value),fmt=fmt,use_builtin_types=use_builtin_types,dict_type=dict_type) def dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False): if _check_py3(): plistlib.dump(value, fp, fmt=fmt, sort_keys=sort_keys, skipkeys=skipkeys) else: if fmt == FMT_XML: # We need to monkey patch a bunch here too in order to avoid auto-sorting # of keys writer = plistlib.PlistWriter(fp) def writeDict(d): if d: writer.beginElement("dict") items = sorted(d.items()) if sort_keys else d.items() for key, value in items: if not isinstance(key, (str,unicode)): if skipkeys: continue raise TypeError("keys must be strings") writer.simpleElement("key", key) writer.writeValue(value) writer.endElement("dict") else: writer.simpleElement("dict") writer.writeDict = writeDict writer.writeln("<plist version=\"1.0\">") writer.writeValue(value) writer.writeln("</plist>") elif fmt == FMT_BINARY: # Assume binary at this point writer = _BinaryPlistWriter(fp, sort_keys=sort_keys, skipkeys=skipkeys) writer.write(value) else: # Not a proper format raise ValueError("Unsupported format: {}".format(fmt)) def dumps(value, fmt=FMT_XML, skipkeys=False, sort_keys=True): if _check_py3(): return plistlib.dumps(value, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys).decode("utf-8") else: # We avoid using writePlistToString() as that uses # cStringIO and fails when Unicode strings are detected f = StringIO() dump(value, f, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys) return f.getvalue() ### ### # Binary Plist Stuff For Py2 # ### ### # From the python 3 plistlib.py source: https://github.com/python/cpython/blob/3.7/Lib/plistlib.py # Tweaked to function on Python 2 class InvalidFileException (ValueError): def __init__(self, message="Invalid file"): ValueError.__init__(self, message) _BINARY_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'} _undefined = object() class _BinaryPlistParser: """ Read or write a binary plist file, following the description of the binary format. Raise InvalidFileException in case of error, otherwise return the root object. see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c """ def __init__(self, use_builtin_types, dict_type): self._use_builtin_types = use_builtin_types self._dict_type = dict_type def parse(self, fp): try: # The basic file format: # HEADER # object... # refid->offset... # TRAILER self._fp = fp self._fp.seek(-32, os.SEEK_END) trailer = self._fp.read(32) if len(trailer) != 32: raise InvalidFileException() ( offset_size, self._ref_size, num_objects, top_object, offset_table_offset ) = struct.unpack('>6xBBQQQ', trailer) self._fp.seek(offset_table_offset) self._object_offsets = self._read_ints(num_objects, offset_size) self._objects = [_undefined] * num_objects return self._read_object(top_object) except (OSError, IndexError, struct.error, OverflowError, UnicodeDecodeError): raise InvalidFileException() def _get_size(self, tokenL): """ return the size of the next object.""" if tokenL == 0xF: m = ord(self._fp.read(1)[0]) & 0x3 s = 1 << m f = '>' + _BINARY_FORMAT[s] return struct.unpack(f, self._fp.read(s))[0] return tokenL def _read_ints(self, n, size): data = self._fp.read(size * n) if size in _BINARY_FORMAT: return struct.unpack('>' + _BINARY_FORMAT[size] * n, data) else: if not size or len(data) != size * n: raise InvalidFileException() return tuple(int.from_bytes(data[i: i + size], 'big') for i in range(0, size * n, size)) def _read_refs(self, n): return self._read_ints(n, self._ref_size) def _read_object(self, ref): """ read the object by reference. May recursively read sub-objects (content of an array/dict/set) """ result = self._objects[ref] if result is not _undefined: return result offset = self._object_offsets[ref] self._fp.seek(offset) token = ord(self._fp.read(1)[0]) tokenH, tokenL = token & 0xF0, token & 0x0F if token == 0: # \x00 or 0x00 result = None elif token == 8: # \x08 or 0x08 result = False elif token == 9: # \x09 or 0x09 result = True # The referenced source code also mentions URL (0x0c, 0x0d) and # UUID (0x0e), but neither can be generated using the Cocoa libraries. elif token == 15: # \x0f or 0x0f result = b'' elif tokenH == 0x10: # int result = 0 for k in xrange((2 << tokenL) - 1): result = (result << 8) + ord(self._fp.read(1)) # result = int.from_bytes(self._fp.read(1 << tokenL), # 'big', signed=tokenL >= 3) elif token == 0x22: # real result = struct.unpack('>f', self._fp.read(4))[0] elif token == 0x23: # real result = struct.unpack('>d', self._fp.read(8))[0] elif token == 0x33: # date f = struct.unpack('>d', self._fp.read(8))[0] # timestamp 0 of binary plists corresponds to 1/1/2001 # (year of Mac OS X 10.0), instead of 1/1/1970. result = (datetime.datetime(2001, 1, 1) + datetime.timedelta(seconds=f)) elif tokenH == 0x40: # data s = self._get_size(tokenL) if self._use_builtin_types: result = self._fp.read(s) else: result = plistlib.Data(self._fp.read(s)) elif tokenH == 0x50: # ascii string s = self._get_size(tokenL) result = self._fp.read(s).decode('ascii') result = result elif tokenH == 0x60: # unicode string s = self._get_size(tokenL) result = self._fp.read(s * 2).decode('utf-16be') # tokenH == 0x80 is documented as 'UID' and appears to be used for # keyed-archiving, not in plists. elif tokenH == 0xA0: # array s = self._get_size(tokenL) obj_refs = self._read_refs(s) result = [] self._objects[ref] = result result.extend(self._read_object(x) for x in obj_refs) # tokenH == 0xB0 is documented as 'ordset', but is not actually # implemented in the Apple reference code. # tokenH == 0xC0 is documented as 'set', but sets cannot be used in # plists. elif tokenH == 0xD0: # dict s = self._get_size(tokenL) key_refs = self._read_refs(s) obj_refs = self._read_refs(s) result = self._dict_type() self._objects[ref] = result for k, o in zip(key_refs, obj_refs): key = self._read_object(k) if isinstance(key, plistlib.Data): key = key.data result[key] = self._read_object(o) else: raise InvalidFileException() self._objects[ref] = result return result def _count_to_size(count): if count < 1 << 8: return 1 elif count < 1 << 16: return 2 elif count << 1 << 32: return 4 else: return 8 _scalars = (str, int, float, datetime.datetime, bytes) class _BinaryPlistWriter (object): def __init__(self, fp, sort_keys, skipkeys): self._fp = fp self._sort_keys = sort_keys self._skipkeys = skipkeys def write(self, value): # Flattened object list: self._objlist = [] # Mappings from object->objectid # First dict has (type(object), object) as the key, # second dict is used when object is not hashable and # has id(object) as the key. self._objtable = {} self._objidtable = {} # Create list of all objects in the plist self._flatten(value) # Size of object references in serialized containers # depends on the number of objects in the plist. num_objects = len(self._objlist) self._object_offsets = [0]*num_objects self._ref_size = _count_to_size(num_objects) self._ref_format = _BINARY_FORMAT[self._ref_size] # Write file header self._fp.write(b'bplist00') # Write object list for obj in self._objlist: self._write_object(obj) # Write refnum->object offset table top_object = self._getrefnum(value) offset_table_offset = self._fp.tell() offset_size = _count_to_size(offset_table_offset) offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects self._fp.write(struct.pack(offset_format, *self._object_offsets)) # Write trailer sort_version = 0 trailer = ( sort_version, offset_size, self._ref_size, num_objects, top_object, offset_table_offset ) self._fp.write(struct.pack('>5xBBBQQQ', *trailer)) def _flatten(self, value): # First check if the object is in the object table, not used for # containers to ensure that two subcontainers with the same contents # will be serialized as distinct values. if isinstance(value, _scalars): if (type(value), value) in self._objtable: return elif isinstance(value, plistlib.Data): if (type(value.data), value.data) in self._objtable: return elif id(value) in self._objidtable: return # Add to objectreference map refnum = len(self._objlist) self._objlist.append(value) if isinstance(value, _scalars): self._objtable[(type(value), value)] = refnum elif isinstance(value, plistlib.Data): self._objtable[(type(value.data), value.data)] = refnum else: self._objidtable[id(value)] = refnum # And finally recurse into containers if isinstance(value, dict): keys = [] values = [] items = value.items() if self._sort_keys: items = sorted(items) for k, v in items: if not isinstance(k, (str,unicode)): if self._skipkeys: continue raise TypeError("keys must be strings") keys.append(k) values.append(v) for o in itertools.chain(keys, values): self._flatten(o) elif isinstance(value, (list, tuple)): for o in value: self._flatten(o) def _getrefnum(self, value): if isinstance(value, _scalars): return self._objtable[(type(value), value)] elif isinstance(value, plistlib.Data): return self._objtable[(type(value.data), value.data)] else: return self._objidtable[id(value)] def _write_size(self, token, size): if size < 15: self._fp.write(struct.pack('>B', token | size)) elif size < 1 << 8: self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size)) elif size < 1 << 16: self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size)) elif size < 1 << 32: self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size)) else: self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size)) def _write_object(self, value): ref = self._getrefnum(value) self._object_offsets[ref] = self._fp.tell() if value is None: self._fp.write(b'\x00') elif value is False: self._fp.write(b'\x08') elif value is True: self._fp.write(b'\x09') elif isinstance(value, int): if value < 0: try: self._fp.write(struct.pack('>Bq', 0x13, value)) except struct.error: raise OverflowError(value) # from None elif value < 1 << 8: self._fp.write(struct.pack('>BB', 0x10, value)) elif value < 1 << 16: self._fp.write(struct.pack('>BH', 0x11, value)) elif value < 1 << 32: self._fp.write(struct.pack('>BL', 0x12, value)) elif value < 1 << 63: self._fp.write(struct.pack('>BQ', 0x13, value)) elif value < 1 << 64: self._fp.write(b'\x14' + value.to_bytes(16, 'big', signed=True)) else: raise OverflowError(value) elif isinstance(value, float): self._fp.write(struct.pack('>Bd', 0x23, value)) elif isinstance(value, datetime.datetime): f = (value - datetime.datetime(2001, 1, 1)).total_seconds() self._fp.write(struct.pack('>Bd', 0x33, f)) elif isinstance(value, plistlib.Data): self._write_size(0x40, len(value.data)) self._fp.write(value.data) elif isinstance(value, (str,unicode)): try: t = value.encode('ascii') self._write_size(0x50, len(value)) except UnicodeEncodeError: t = value.encode('utf-16be') self._write_size(0x60, len(t) // 2) self._fp.write(t) elif isinstance(value, (bytes, bytearray)): self._write_size(0x40, len(value)) self._fp.write(value) elif isinstance(value, (list, tuple)): refs = [self._getrefnum(o) for o in value] s = len(refs) self._write_size(0xA0, s) self._fp.write(struct.pack('>' + self._ref_format * s, *refs)) elif isinstance(value, dict): keyRefs, valRefs = [], [] if self._sort_keys: rootItems = sorted(value.items()) else: rootItems = value.items() for k, v in rootItems: if not isinstance(k, (str,unicode)): if self._skipkeys: continue raise TypeError("keys must be strings") keyRefs.append(self._getrefnum(k)) valRefs.append(self._getrefnum(v)) s = len(keyRefs) self._write_size(0xD0, s) self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs)) self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs)) else: raise TypeError(value)
Scripts/plist.py
19,762
Read or write a binary plist file, following the description of the binary format. Raise InvalidFileException in case of error, otherwise return the root object. see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c return the size of the next object. read the object by reference. May recursively read sub-objects (content of an array/dict/set) Imports Force use of StringIO instead of cStringIO as the latter has issues with Unicode strings Helper Methods Deprecated Functions - Remapped Remapped Functions We need to monkey patch this to allow for hex integers - code taken/modified from https://github.com/python/cpython/blob/3.8/Lib/plistlib.py Monkey patch! Is not binary - assume a string - and try to load We avoid using readPlistFromString() as that uses cStringIO and fails when Unicode strings are detected Don't subclass - keep the parser local Create a new PlistParser object - then we need to set up the values and parse. We also need to monkey patch this to allow for other dict_types Encode unicode -> string; use utf-8 for safety It's a string - let's wrap it up Parse it If it's a string - encode it We need to monkey patch a bunch here too in order to avoid auto-sorting of keys Assume binary at this point Not a proper format We avoid using writePlistToString() as that uses cStringIO and fails when Unicode strings are detected Binary Plist Stuff For Py2 From the python 3 plistlib.py source: https://github.com/python/cpython/blob/3.7/Lib/plistlib.py Tweaked to function on Python 2 The basic file format: HEADER object... refid->offset... TRAILER \x00 or 0x00 \x08 or 0x08 \x09 or 0x09 The referenced source code also mentions URL (0x0c, 0x0d) and UUID (0x0e), but neither can be generated using the Cocoa libraries. \x0f or 0x0f int result = int.from_bytes(self._fp.read(1 << tokenL), 'big', signed=tokenL >= 3) real real date timestamp 0 of binary plists corresponds to 1/1/2001 (year of Mac OS X 10.0), instead of 1/1/1970. data ascii string unicode string tokenH == 0x80 is documented as 'UID' and appears to be used for keyed-archiving, not in plists. array tokenH == 0xB0 is documented as 'ordset', but is not actually implemented in the Apple reference code. tokenH == 0xC0 is documented as 'set', but sets cannot be used in plists. dict Flattened object list: Mappings from object->objectid First dict has (type(object), object) as the key, second dict is used when object is not hashable and has id(object) as the key. Create list of all objects in the plist Size of object references in serialized containers depends on the number of objects in the plist. Write file header Write object list Write refnum->object offset table Write trailer First check if the object is in the object table, not used for containers to ensure that two subcontainers with the same contents will be serialized as distinct values. Add to objectreference map And finally recurse into containers from None
3,139
en
0.805817
""" Utils for AiiDA. ---------------- Utilities for making working against AiiDA a bit easier. Mostly here due to historical reasons when AiiDA was rapidly developed. In the future most routines that have now standardized in AiiDA will be removed. """ # pylint: disable=import-outside-toplevel import numpy as np from packaging import version from aiida.orm import User from aiida.cmdline.utils.decorators import with_dbenv BASIC_DATA_TYPES = ['bool', 'float', 'int', 'list', 'str', 'dict'] def get_data_node(data_type, *args, **kwargs): return get_data_class(data_type)(*args, **kwargs) def querybuild(cls, **kwargs): """ Instantiates and returns a QueryBuilder instance. The QueryBuilder's path has one vertice so far, namely this class. Additional parameters (e.g. filters or a label), can be passes as keyword arguments. :param label: Label to give :param filters: filters to apply :param project: projections :returns: a QueryBuilder instance. """ from aiida.orm import QueryBuilder query_builder = QueryBuilder() filters = kwargs.pop('filters', {}) query_builder.append(cls, filters=filters, **kwargs) return query_builder @with_dbenv() def get_data_class(data_type): """Provide access to the orm.data classes with deferred dbenv loading.""" from aiida.plugins import DataFactory from aiida.common.exceptions import MissingEntryPointError data_cls = None try: data_cls = DataFactory(data_type) except MissingEntryPointError as err: raise err return data_cls def get_current_user(): """Get current user.""" current_user = User.objects.get_default() return current_user def copy_parameter(old_parameter): """Assemble a new Dict.""" return get_data_node('dict', dict=old_parameter.get_dict()) def displaced_structure(structure, displacement, entry): disp_structure = structure.clone() displace_position(disp_structure, displacement, entry) return disp_structure def compressed_structure(structure, volume_change): comp_structure = structure.clone() compress_cell(comp_structure, volume_change) return comp_structure def displace_position(structure, displacement, entry): """Displace a position in the StructureData.""" sites = structure.sites positions = [] for site in sites: positions.append(site.position) new_position = np.asarray(positions[entry - 1]) + displacement new_position = new_position.tolist() positions[entry - 1] = tuple(new_position) structure.reset_sites_positions(positions) def compress_cell(structure, volume_change): """Apply compression or tensile forces to the unit cell.""" cell = structure.cell new_cell = np.array(cell) * volume_change structure.reset_cell(new_cell.tolist()) def aiida_version(): from aiida import __version__ as aiida_version_ return version.parse(aiida_version_) def cmp_version(string): return version.parse(string) def cmp_load_verdi_data(): """Load the verdi data click command group for any version since 0.11.""" verdi_data = None import_errors = [] try: from aiida.cmdline.commands import data_cmd as verdi_data except ImportError as err: import_errors.append(err) if not verdi_data: try: from aiida.cmdline.commands import verdi_data except ImportError as err: import_errors.append(err) if not verdi_data: try: from aiida.cmdline.commands.cmd_data import verdi_data except ImportError as err: import_errors.append(err) if not verdi_data: err_messages = '\n'.join([' * {}'.format(err) for err in import_errors]) raise ImportError('The verdi data base command group could not be found:\n' + err_messages) return verdi_data def create_authinfo(computer, store=False): """Allow the current user to use the given computer.""" from aiida.orm import AuthInfo authinfo = AuthInfo(computer=computer, user=get_current_user()) if store: authinfo.store() return authinfo def cmp_get_authinfo(computer): """Get an existing authinfo or None for the given computer and current user.""" return computer.get_authinfo(get_current_user()) def cmp_get_transport(computer): if hasattr(computer, 'get_transport'): return computer.get_transport() authinfo = cmp_get_authinfo(computer) return authinfo.get_transport()
aiida_vasp/utils/aiida_utils.py
4,520
Get an existing authinfo or None for the given computer and current user. Load the verdi data click command group for any version since 0.11. Apply compression or tensile forces to the unit cell. Assemble a new Dict. Allow the current user to use the given computer. Displace a position in the StructureData. Get current user. Provide access to the orm.data classes with deferred dbenv loading. Instantiates and returns a QueryBuilder instance. The QueryBuilder's path has one vertice so far, namely this class. Additional parameters (e.g. filters or a label), can be passes as keyword arguments. :param label: Label to give :param filters: filters to apply :param project: projections :returns: a QueryBuilder instance. Utils for AiiDA. ---------------- Utilities for making working against AiiDA a bit easier. Mostly here due to historical reasons when AiiDA was rapidly developed. In the future most routines that have now standardized in AiiDA will be removed. pylint: disable=import-outside-toplevel
1,009
en
0.824488
# Fully Written by @HeisenbergTheDanger (Keep credits else gay) # Permission Seeked By @StarkXD - Approved import asyncio import datetime from telethon import events from var import Var from uniborg.util import admin_cmd from telethon.tl.types import ( DocumentAttributeFilename, DocumentAttributeSticker, InputMediaUploadedDocument, InputMediaUploadedPhoto, InputPeerNotifySettings, InputStickerSetID, InputStickerSetShortName, MessageMediaPhoto ) from userbot.plugins.sql_helper.broadcast_sql import in_channels, add_channel, rm_channel, get_all_channels logs_id = Var.PRIVATE_GROUP_ID @borg.on(admin_cmd("bforward ?(.*)", allow_sudo=True)) async def forw(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a message to broadcast.") return channels = get_all_channels() await event.edit("Sending...") error_count = 0 sent_count = 0 if event.reply_to_msg_id: previous_message = await event.get_reply_message() message = previous_message.message raw_text = previous_message.raw_text error_count = 0 for channel in channels: try: await borg.forward_messages(int(channel.chat_id), previous_message) sent_count += 1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") except Exception as error: try: await borg.send_message(logs_id, f"Error in sending at {channel.chat_id}.") await borg.send_message(logs_id, "Error! " + str(error)) if error == "The message cannot be empty unless a file is provided": event.edit("For sending files, upload in Saved Messages and reply .forward to in.") return except: pass error_count+=1 await event.edit(f"Sent : {sent_count}\nError : {error_count}") await event.edit(f"{sent_count} messages sent with {error_count} errors.") if error_count > 0: try: await borg.send_message(logs_id, f"{error_count} Errors") except: await event.edit("Set up log channel for checking errors.") @borg.on(admin_cmd("broadcast ?(.*)", allow_sudo=True)) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a message to broadcast.") return channels = get_all_channels() error_count = 0 sent_count = 0 await event.edit("Sending....") if event.reply_to_msg_id: previous_message = await event.get_reply_message() if previous_message.sticker or previous_message.poll: await event.edit("Reply .forward for stickers and polls.") return if previous_message.gif or previous_message.audio or previous_message.voice or previous_message.video or previous_message.video_note or previous_message.contact or previous_message.game or previous_message.geo or previous_message.invoice: # Written by @HeisenbergTheDanger await event.edit("Not supported. Try .forward") return if not previous_message.web_preview and previous_message.photo: file = await borg.download_file(previous_message.media) uploaded_doc = await borg.upload_file(file, file_name="img.png") raw_text = previous_message.text for channel in channels: try: if previous_message.photo: await borg.send_file( int(channel.chat_id), InputMediaUploadedPhoto( file=uploaded_doc ), force_document=False, caption = raw_text, link_preview = False ) sent_count += 1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") except Exception as error: try: await borg.send_message(logs_id, f"Error in sending at {chat_id}.") await borg.send_message(logs_id, "Error! " + str(error)) if error == "The message cannot be empty unless a file is provided": event.edit("For sending files, upload in Saved Messages and reply .forward to in.") return except: pass error_count += 1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") await event.edit(f"{sent_count} messages sent with {error_count} errors.") if error_count > 0: try: await borg.send_message(logs_id, f"{error_count} Errors") except: pass else: raw_text = previous_message.text for channel in channels: try: await borg.send_message(int(channel.chat_id), raw_text, link_preview = False) sent_count += 1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") except Exception as error: try: await borg.send_message(logs_id, f"Error in sending at {channel.chat_id}.") await borg.send_message(logs_id, "Error! " + str(error)) if error == "The message cannot be empty unless a file is provided": event.edit("For sending files, upload in Saved Messages and reply .forward to in.") return except: pass error_count+=1 await event.edit(f"Sent : {sent_count}\nError : {error_count}\nTotal : {len(channels)}") await event.edit(f"{sent_count} messages sent with {error_count} errors.") if error_count > 0: try: await borg.send_message(logs_id, f"{error_count} Errors") except: await event.edit("Set up log channel for checking errors.") # Written by @HeisenbergTheDanger @borg.on(admin_cmd("badd ?(.*)", allow_sudo=True)) async def add_ch(event): if event.fwd_from: return if event.reply_to_msg_id: await event.edit("Adding...") previous_message = await event.get_reply_message() raw_text = previous_message.text lines = raw_text.split("\n") length = len(lines) for line_number in range(1, length - 2): channel_id = lines[line_number][4:-1] if not in_channels(channel_id): add_channel(channel_id) await event.edit("Channels added!") await asyncio.sleep(3) await event.delete() return chat_id = event.chat_id try: if int(chat_id) == logs_id: return except: pass if not in_channels(chat_id): add_channel(chat_id) await event.edit("`Added Successfuly To List`") await asyncio.sleep(3) await event.delete() elif in_channels(chat_id): await event.edit("`Channel is already is database!`") await asyncio.sleep(3) await event.delete() @borg.on(admin_cmd("brm ?(.*)", allow_sudo=True)) async def remove_ch(event): if event.fwd_from: return chat_id = event.pattern_match.group(1) if chat_id == "all": await event.edit("Removing...") channels = get_all_channels() for channel in channels: rm_channel(channel.chat_id) await event.edit("Database cleared.") return if in_channels(chat_id): rm_channel(chat_id) await event.edit("Removed from database") await asyncio.sleep(3) await event.delete() elif in_channels(event.chat_id): rm_channel(event.chat_id) await event.edit("Removed from database") await asyncio.sleep(3) await event.delete() elif not in_channels(event.chat_id): await event.edit("Channel is already removed from database. ") await asyncio.sleep(3) await event.delete() @borg.on(admin_cmd("listchannels", allow_sudo=True)) async def list(event): if event.fwd_from: return channels = get_all_channels() msg = "Channels in database:\n" for channel in channels: msg += f"=> `{channel.chat_id}`\n" msg += f"\nTotal {len(channels)} channels." if len(msg) > Config.MAX_MESSAGE_SIZE_LIMIT: with io.BytesIO(str.encode(msg)) as out_file: out_file.name = "channels.text" await borg.send_file( event.chat_id, out_file, force_document=True, allow_cache=False, caption="Channels in database", reply_to=event ) await event.delete() else: await event.edit(msg)
userbot/plugins/broadcast.py
8,670
Fully Written by @HeisenbergTheDanger (Keep credits else gay) Permission Seeked By @StarkXD - Approved Written by @HeisenbergTheDanger Written by @HeisenbergTheDanger
166
en
0.918315
import pytest import sinergym.utils.rewards as R @pytest.mark.parametrize( 'power,temperatures,month,day,reward,reward_energy,reward_comfort', [ # Input 1 ( 186.5929171535975, [22.16742570092868], 3, 31, -0.009329645857679876, -0.018659291715359752, -0.0 ), # Input 2 ( 688.0477550424935, [26.7881162590194], 3, 30, -1.6784605172618248, -0.06880477550424935, -3.2881162590194 ), # Input 3 ( 23168.30752221127, [20.37505026953311], 2, 25, -1.1584153761105636, -2.316830752221127, -0.0 ), ] ) def test_calculate( simple_reward, power, temperatures, month, day, reward, reward_energy, reward_comfort): result = simple_reward.calculate(power, temperatures, month, day) assert result[0] == reward assert result[1]['reward_energy'] == reward_energy assert result[1]['reward_comfort'] == reward_comfort
tests/test_reward.py
1,215
Input 1 Input 2 Input 3
23
my
0.064841
from main import main from src.common import fill_with class Arguments: pass # latex info result_structure = '|c|c|c|c|c|c|' test_structure = '|c|c|c|c|c|c|c|' table_body = ''' \\begin{center} \t\\begin{tabular}{%s} \t\t\\hline \\rowcolor{brown!50} %s \t\\end{tabular} \\end{center}\n ''' # Util data for build the result tables bots = ['reactive', 'proactive'] test_data_order = ['DIRTY-MEAN', 'CLEAN', 'TLE', 'FIRED'] result_headers = '\t\tTest Id & Tipo de robot & Suciedad media & Ambiente limpio & Tiempo terminado & Despedido \\\\ ' # Util data for build the test table prop = ['rows', 'columns', 'babies', 'time', 'toys', 'dirty'] test_headers = "\t\tTest Id & Filas & Columnas & Beb\\'es & Tiempo(t) & Obstaculos & Suciedad \\\\ \hline" test = [ # n, m, b, t, o, d [10, 10, 6, 2, 20, 40], [10, 10, 6, 3, 20, 40], [10, 10, 6, 4, 20, 40], [10, 10, 6, 5, 20, 40], [10, 10, 6, 10, 20, 40], [10, 10, 6, 15, 20, 40], [ 7, 8, 7, 5, 10, 10], [ 7, 8, 7, 5, 14, 30], [20, 20, 16, 15, 0, 45], [20, 20, 16, 15, 30, 0], [20, 20, 5, 15, 0, 0], [ 7, 8, 6, 2, 20, 40], ] def build_args(): args = Arguments() args.env = 'house' args.level = 'notset' args.log_file = '' args.cicles = 100 args.repetitions = 100 args.verbose = False return args def build_test_row(data, id): test_name = '\\multirow{%d}{*}{t%d}' % (len(data), id) empty = ' ' * len(test_name) row = ' & %s' * 5 callback = lambda: empty test_gen = fill_with([test_name], callback) table = [] for (name, d), head in zip(data, test_gen): current_row = row % (name, *[str(d[key]) for key in test_data_order]) table.append(f'\t\t{head}{current_row} \\\\ ') return '\\cline{2-6}\n'.join(table) + '\\hline ' def build_test_table(data): table = [test_headers] for i, t in enumerate(data): row = ' & '.join([str(v) for v in [f't{i}', *t]]) table.append(f'\t\t{row} \\\\ \\hline') return '\n'.join(table) def build_result_table(data): args = build_args() table = [result_headers] for i, t in enumerate(data): for attr in zip(prop, t): setattr(args, *attr) data = [] for bot in bots: args.robot = bot data.append((bot, main(args))) table.append(build_test_row(data, i)) return '\\hline\n'.join(table) if __name__ == "__main__": print(table_body % (test_structure, build_test_table(test))) print(table_body % (result_structure, build_result_table(test)))
test.py
2,679
latex info Util data for build the result tables Util data for build the test table n, m, b, t, o, d
105
en
0.281392
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/models/anchor_heads_rotated/s2anet_head.py import paddle from paddle import ParamAttr import paddle.nn as nn import paddle.nn.functional as F from paddle.nn.initializer import Normal, Constant from ppdet.core.workspace import register from ppdet.modeling import ops from ppdet.modeling import bbox_utils from ppdet.modeling.proposal_generator.target_layer import RBoxAssigner import numpy as np class S2ANetAnchorGenerator(nn.Layer): """ AnchorGenerator by paddle """ def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None): super(S2ANetAnchorGenerator, self).__init__() self.base_size = base_size self.scales = paddle.to_tensor(scales) self.ratios = paddle.to_tensor(ratios) self.scale_major = scale_major self.ctr = ctr self.base_anchors = self.gen_base_anchors() @property def num_base_anchors(self): return self.base_anchors.shape[0] def gen_base_anchors(self): w = self.base_size h = self.base_size if self.ctr is None: x_ctr = 0.5 * (w - 1) y_ctr = 0.5 * (h - 1) else: x_ctr, y_ctr = self.ctr h_ratios = paddle.sqrt(self.ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:] * self.scales[:]).reshape([-1]) hs = (h * h_ratios[:] * self.scales[:]).reshape([-1]) else: ws = (w * self.scales[:] * w_ratios[:]).reshape([-1]) hs = (h * self.scales[:] * h_ratios[:]).reshape([-1]) base_anchors = paddle.stack( [ x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1) ], axis=-1) base_anchors = paddle.round(base_anchors) return base_anchors def _meshgrid(self, x, y, row_major=True): yy, xx = paddle.meshgrid(y, x) yy = yy.reshape([-1]) xx = xx.reshape([-1]) if row_major: return xx, yy else: return yy, xx def forward(self, featmap_size, stride=16): # featmap_size*stride project it to original area feat_h = featmap_size[0] feat_w = featmap_size[1] shift_x = paddle.arange(0, feat_w, 1, 'int32') * stride shift_y = paddle.arange(0, feat_h, 1, 'int32') * stride shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = paddle.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1) all_anchors = self.base_anchors[:, :] + shifts[:, :] all_anchors = all_anchors.reshape([feat_h * feat_w, 4]) return all_anchors def valid_flags(self, featmap_size, valid_size): feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = paddle.zeros([feat_w], dtype='int32') valid_y = paddle.zeros([feat_h], dtype='int32') valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy valid = paddle.reshape(valid, [-1, 1]) valid = paddle.expand(valid, [-1, self.num_base_anchors]).reshape([-1]) return valid class AlignConv(nn.Layer): def __init__(self, in_channels, out_channels, kernel_size=3, groups=1): super(AlignConv, self).__init__() self.kernel_size = kernel_size self.align_conv = paddle.vision.ops.DeformConv2D( in_channels, out_channels, kernel_size=self.kernel_size, padding=(self.kernel_size - 1) // 2, groups=groups, weight_attr=ParamAttr(initializer=Normal(0, 0.01)), bias_attr=None) @paddle.no_grad() def get_offset(self, anchors, featmap_size, stride): """ Args: anchors: [M,5] xc,yc,w,h,angle featmap_size: (feat_h, feat_w) stride: 8 Returns: """ anchors = paddle.reshape(anchors, [-1, 5]) # (NA,5) dtype = anchors.dtype feat_h = featmap_size[0] feat_w = featmap_size[1] pad = (self.kernel_size - 1) // 2 idx = paddle.arange(-pad, pad + 1, dtype=dtype) yy, xx = paddle.meshgrid(idx, idx) xx = paddle.reshape(xx, [-1]) yy = paddle.reshape(yy, [-1]) # get sampling locations of default conv xc = paddle.arange(0, feat_w, dtype=dtype) yc = paddle.arange(0, feat_h, dtype=dtype) yc, xc = paddle.meshgrid(yc, xc) xc = paddle.reshape(xc, [-1, 1]) yc = paddle.reshape(yc, [-1, 1]) x_conv = xc + xx y_conv = yc + yy # get sampling locations of anchors # x_ctr, y_ctr, w, h, a = np.unbind(anchors, dim=1) x_ctr = anchors[:, 0] y_ctr = anchors[:, 1] w = anchors[:, 2] h = anchors[:, 3] a = anchors[:, 4] x_ctr = paddle.reshape(x_ctr, [-1, 1]) y_ctr = paddle.reshape(y_ctr, [-1, 1]) w = paddle.reshape(w, [-1, 1]) h = paddle.reshape(h, [-1, 1]) a = paddle.reshape(a, [-1, 1]) x_ctr = x_ctr / stride y_ctr = y_ctr / stride w_s = w / stride h_s = h / stride cos, sin = paddle.cos(a), paddle.sin(a) dw, dh = w_s / self.kernel_size, h_s / self.kernel_size x, y = dw * xx, dh * yy xr = cos * x - sin * y yr = sin * x + cos * y x_anchor, y_anchor = xr + x_ctr, yr + y_ctr # get offset filed offset_x = x_anchor - x_conv offset_y = y_anchor - y_conv offset = paddle.stack([offset_y, offset_x], axis=-1) offset = paddle.reshape( offset, [feat_h * feat_w, self.kernel_size * self.kernel_size * 2]) offset = paddle.transpose(offset, [1, 0]) offset = paddle.reshape( offset, [1, self.kernel_size * self.kernel_size * 2, feat_h, feat_w]) return offset def forward(self, x, refine_anchors, featmap_size, stride): offset = self.get_offset(refine_anchors, featmap_size, stride) x = F.relu(self.align_conv(x, offset)) return x @register class S2ANetHead(nn.Layer): """ S2Anet head Args: stacked_convs (int): number of stacked_convs feat_in (int): input channels of feat feat_out (int): output channels of feat num_classes (int): num_classes anchor_strides (list): stride of anchors anchor_scales (list): scale of anchors anchor_ratios (list): ratios of anchors target_means (list): target_means target_stds (list): target_stds align_conv_type (str): align_conv_type ['Conv', 'AlignConv'] align_conv_size (int): kernel size of align_conv use_sigmoid_cls (bool): use sigmoid_cls or not reg_loss_weight (list): loss weight for regression """ __shared__ = ['num_classes'] __inject__ = ['anchor_assign'] def __init__(self, stacked_convs=2, feat_in=256, feat_out=256, num_classes=15, anchor_strides=[8, 16, 32, 64, 128], anchor_scales=[4], anchor_ratios=[1.0], target_means=0.0, target_stds=1.0, align_conv_type='AlignConv', align_conv_size=3, use_sigmoid_cls=True, anchor_assign=RBoxAssigner().__dict__, reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.1], cls_loss_weight=[1.1, 1.05], reg_loss_type='l1'): super(S2ANetHead, self).__init__() self.stacked_convs = stacked_convs self.feat_in = feat_in self.feat_out = feat_out self.anchor_list = None self.anchor_scales = anchor_scales self.anchor_ratios = anchor_ratios self.anchor_strides = anchor_strides self.anchor_strides = paddle.to_tensor(anchor_strides) self.anchor_base_sizes = list(anchor_strides) self.means = paddle.ones(shape=[5]) * target_means self.stds = paddle.ones(shape=[5]) * target_stds assert align_conv_type in ['AlignConv', 'Conv', 'DCN'] self.align_conv_type = align_conv_type self.align_conv_size = align_conv_size self.use_sigmoid_cls = use_sigmoid_cls self.cls_out_channels = num_classes if self.use_sigmoid_cls else 1 self.sampling = False self.anchor_assign = anchor_assign self.reg_loss_weight = reg_loss_weight self.cls_loss_weight = cls_loss_weight self.alpha = 1.0 self.beta = 1.0 self.reg_loss_type = reg_loss_type self.s2anet_head_out = None # anchor self.anchor_generators = [] for anchor_base in self.anchor_base_sizes: self.anchor_generators.append( S2ANetAnchorGenerator(anchor_base, anchor_scales, anchor_ratios)) self.anchor_generators = nn.LayerList(self.anchor_generators) self.fam_cls_convs = nn.Sequential() self.fam_reg_convs = nn.Sequential() for i in range(self.stacked_convs): chan_in = self.feat_in if i == 0 else self.feat_out self.fam_cls_convs.add_sublayer( 'fam_cls_conv_{}'.format(i), nn.Conv2D( in_channels=chan_in, out_channels=self.feat_out, kernel_size=3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0)))) self.fam_cls_convs.add_sublayer('fam_cls_conv_{}_act'.format(i), nn.ReLU()) self.fam_reg_convs.add_sublayer( 'fam_reg_conv_{}'.format(i), nn.Conv2D( in_channels=chan_in, out_channels=self.feat_out, kernel_size=3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0)))) self.fam_reg_convs.add_sublayer('fam_reg_conv_{}_act'.format(i), nn.ReLU()) self.fam_reg = nn.Conv2D( self.feat_out, 5, 1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0))) prior_prob = 0.01 bias_init = float(-np.log((1 - prior_prob) / prior_prob)) self.fam_cls = nn.Conv2D( self.feat_out, self.cls_out_channels, 1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(bias_init))) if self.align_conv_type == "AlignConv": self.align_conv = AlignConv(self.feat_out, self.feat_out, self.align_conv_size) elif self.align_conv_type == "Conv": self.align_conv = nn.Conv2D( self.feat_out, self.feat_out, self.align_conv_size, padding=(self.align_conv_size - 1) // 2, bias_attr=ParamAttr(initializer=Constant(0))) elif self.align_conv_type == "DCN": self.align_conv_offset = nn.Conv2D( self.feat_out, 2 * self.align_conv_size**2, 1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0))) self.align_conv = paddle.vision.ops.DeformConv2D( self.feat_out, self.feat_out, self.align_conv_size, padding=(self.align_conv_size - 1) // 2, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=False) self.or_conv = nn.Conv2D( self.feat_out, self.feat_out, kernel_size=3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0))) # ODM self.odm_cls_convs = nn.Sequential() self.odm_reg_convs = nn.Sequential() for i in range(self.stacked_convs): ch_in = self.feat_out # ch_in = int(self.feat_out / 8) if i == 0 else self.feat_out self.odm_cls_convs.add_sublayer( 'odm_cls_conv_{}'.format(i), nn.Conv2D( in_channels=ch_in, out_channels=self.feat_out, kernel_size=3, stride=1, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0)))) self.odm_cls_convs.add_sublayer('odm_cls_conv_{}_act'.format(i), nn.ReLU()) self.odm_reg_convs.add_sublayer( 'odm_reg_conv_{}'.format(i), nn.Conv2D( in_channels=self.feat_out, out_channels=self.feat_out, kernel_size=3, stride=1, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0)))) self.odm_reg_convs.add_sublayer('odm_reg_conv_{}_act'.format(i), nn.ReLU()) self.odm_cls = nn.Conv2D( self.feat_out, self.cls_out_channels, 3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(bias_init))) self.odm_reg = nn.Conv2D( self.feat_out, 5, 3, padding=1, weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)), bias_attr=ParamAttr(initializer=Constant(0))) self.featmap_sizes = [] self.base_anchors_list = [] self.refine_anchor_list = [] def forward(self, feats): fam_reg_branch_list = [] fam_cls_branch_list = [] odm_reg_branch_list = [] odm_cls_branch_list = [] self.featmap_sizes_list = [] self.base_anchors_list = [] self.refine_anchor_list = [] for feat_idx in range(len(feats)): feat = feats[feat_idx] fam_cls_feat = self.fam_cls_convs(feat) fam_cls = self.fam_cls(fam_cls_feat) # [N, CLS, H, W] --> [N, H, W, CLS] fam_cls = fam_cls.transpose([0, 2, 3, 1]) fam_cls_reshape = paddle.reshape( fam_cls, [fam_cls.shape[0], -1, self.cls_out_channels]) fam_cls_branch_list.append(fam_cls_reshape) fam_reg_feat = self.fam_reg_convs(feat) fam_reg = self.fam_reg(fam_reg_feat) # [N, 5, H, W] --> [N, H, W, 5] fam_reg = fam_reg.transpose([0, 2, 3, 1]) fam_reg_reshape = paddle.reshape(fam_reg, [fam_reg.shape[0], -1, 5]) fam_reg_branch_list.append(fam_reg_reshape) # prepare anchor featmap_size = (paddle.shape(feat)[2], paddle.shape(feat)[3]) self.featmap_sizes_list.append(featmap_size) init_anchors = self.anchor_generators[feat_idx]( featmap_size, self.anchor_strides[feat_idx]) init_anchors = paddle.to_tensor(init_anchors, dtype='float32') NA = featmap_size[0] * featmap_size[1] init_anchors = paddle.reshape(init_anchors, [NA, 4]) init_anchors = self.rect2rbox(init_anchors) self.base_anchors_list.append(init_anchors) if self.training: refine_anchor = self.bbox_decode(fam_reg.detach(), init_anchors) else: refine_anchor = self.bbox_decode(fam_reg, init_anchors) self.refine_anchor_list.append(refine_anchor) if self.align_conv_type == 'AlignConv': align_feat = self.align_conv(feat, refine_anchor.clone(), featmap_size, self.anchor_strides[feat_idx]) elif self.align_conv_type == 'DCN': align_offset = self.align_conv_offset(feat) align_feat = self.align_conv(feat, align_offset) elif self.align_conv_type == 'Conv': align_feat = self.align_conv(feat) or_feat = self.or_conv(align_feat) odm_reg_feat = or_feat odm_cls_feat = or_feat odm_reg_feat = self.odm_reg_convs(odm_reg_feat) odm_cls_feat = self.odm_cls_convs(odm_cls_feat) odm_cls_score = self.odm_cls(odm_cls_feat) # [N, CLS, H, W] --> [N, H, W, CLS] odm_cls_score = odm_cls_score.transpose([0, 2, 3, 1]) odm_cls_score_shape = odm_cls_score.shape odm_cls_score_reshape = paddle.reshape(odm_cls_score, [ odm_cls_score_shape[0], odm_cls_score_shape[1] * odm_cls_score_shape[2], self.cls_out_channels ]) odm_cls_branch_list.append(odm_cls_score_reshape) odm_bbox_pred = self.odm_reg(odm_reg_feat) # [N, 5, H, W] --> [N, H, W, 5] odm_bbox_pred = odm_bbox_pred.transpose([0, 2, 3, 1]) odm_bbox_pred_reshape = paddle.reshape(odm_bbox_pred, [-1, 5]) odm_bbox_pred_reshape = paddle.unsqueeze( odm_bbox_pred_reshape, axis=0) odm_reg_branch_list.append(odm_bbox_pred_reshape) self.s2anet_head_out = (fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list) return self.s2anet_head_out def get_prediction(self, nms_pre=2000): refine_anchors = self.refine_anchor_list fam_cls_branch_list = self.s2anet_head_out[0] fam_reg_branch_list = self.s2anet_head_out[1] odm_cls_branch_list = self.s2anet_head_out[2] odm_reg_branch_list = self.s2anet_head_out[3] pred_scores, pred_bboxes = self.get_bboxes( odm_cls_branch_list, odm_reg_branch_list, refine_anchors, nms_pre, self.cls_out_channels, self.use_sigmoid_cls) return pred_scores, pred_bboxes def smooth_l1_loss(self, pred, label, delta=1.0 / 9.0): """ Args: pred: pred score label: label delta: delta Returns: loss """ assert pred.shape == label.shape and label.numel() > 0 assert delta > 0 diff = paddle.abs(pred - label) loss = paddle.where(diff < delta, 0.5 * diff * diff / delta, diff - 0.5 * delta) return loss def get_fam_loss(self, fam_target, s2anet_head_out, reg_loss_type='gwd'): (labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes, pos_inds, neg_inds) = fam_target fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out fam_cls_losses = [] fam_bbox_losses = [] st_idx = 0 num_total_samples = len(pos_inds) + len( neg_inds) if self.sampling else len(pos_inds) num_total_samples = max(1, num_total_samples) for idx, feat_size in enumerate(self.featmap_sizes_list): feat_anchor_num = feat_size[0] * feat_size[1] # step1: get data feat_labels = labels[st_idx:st_idx + feat_anchor_num] feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num] feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :] feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :] # step2: calc cls loss feat_labels = feat_labels.reshape(-1) feat_label_weights = feat_label_weights.reshape(-1) fam_cls_score = fam_cls_branch_list[idx] fam_cls_score = paddle.squeeze(fam_cls_score, axis=0) fam_cls_score1 = fam_cls_score feat_labels = paddle.to_tensor(feat_labels) feat_labels_one_hot = paddle.nn.functional.one_hot( feat_labels, self.cls_out_channels + 1) feat_labels_one_hot = feat_labels_one_hot[:, 1:] feat_labels_one_hot.stop_gradient = True num_total_samples = paddle.to_tensor( num_total_samples, dtype='float32', stop_gradient=True) fam_cls = F.sigmoid_focal_loss( fam_cls_score1, feat_labels_one_hot, normalizer=num_total_samples, reduction='none') feat_label_weights = feat_label_weights.reshape( feat_label_weights.shape[0], 1) feat_label_weights = np.repeat( feat_label_weights, self.cls_out_channels, axis=1) feat_label_weights = paddle.to_tensor( feat_label_weights, stop_gradient=True) fam_cls = fam_cls * feat_label_weights fam_cls_total = paddle.sum(fam_cls) fam_cls_losses.append(fam_cls_total) # step3: regression loss feat_bbox_targets = paddle.to_tensor( feat_bbox_targets, dtype='float32', stop_gradient=True) feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5]) fam_bbox_pred = fam_reg_branch_list[idx] fam_bbox_pred = paddle.squeeze(fam_bbox_pred, axis=0) fam_bbox_pred = paddle.reshape(fam_bbox_pred, [-1, 5]) fam_bbox = self.smooth_l1_loss(fam_bbox_pred, feat_bbox_targets) loss_weight = paddle.to_tensor( self.reg_loss_weight, dtype='float32', stop_gradient=True) fam_bbox = paddle.multiply(fam_bbox, loss_weight) feat_bbox_weights = paddle.to_tensor( feat_bbox_weights, stop_gradient=True) if reg_loss_type == 'l1': fam_bbox = fam_bbox * feat_bbox_weights fam_bbox_total = paddle.sum(fam_bbox) / num_total_samples elif reg_loss_type == 'iou' or reg_loss_type == 'gwd': fam_bbox = paddle.sum(fam_bbox, axis=-1) feat_bbox_weights = paddle.sum(feat_bbox_weights, axis=-1) try: from rbox_iou_ops import rbox_iou except Exception as e: print("import custom_ops error, try install rbox_iou_ops " \ "following ppdet/ext_op/README.md", e) sys.stdout.flush() sys.exit(-1) # calc iou fam_bbox_decode = self.delta2rbox(self.base_anchors_list[idx], fam_bbox_pred) bbox_gt_bboxes = paddle.to_tensor( bbox_gt_bboxes, dtype=fam_bbox_decode.dtype, place=fam_bbox_decode.place) bbox_gt_bboxes.stop_gradient = True iou = rbox_iou(fam_bbox_decode, bbox_gt_bboxes) iou = paddle.diag(iou) if reg_loss_type == 'gwd': bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx + feat_anchor_num, :] fam_bbox_total = self.gwd_loss(fam_bbox_decode, bbox_gt_bboxes_level) fam_bbox_total = fam_bbox_total * feat_bbox_weights fam_bbox_total = paddle.sum( fam_bbox_total) / num_total_samples fam_bbox_losses.append(fam_bbox_total) st_idx += feat_anchor_num fam_cls_loss = paddle.add_n(fam_cls_losses) fam_cls_loss_weight = paddle.to_tensor( self.cls_loss_weight[0], dtype='float32', stop_gradient=True) fam_cls_loss = fam_cls_loss * fam_cls_loss_weight fam_reg_loss = paddle.add_n(fam_bbox_losses) return fam_cls_loss, fam_reg_loss def get_odm_loss(self, odm_target, s2anet_head_out, reg_loss_type='gwd'): (labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes, pos_inds, neg_inds) = odm_target fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out odm_cls_losses = [] odm_bbox_losses = [] st_idx = 0 num_total_samples = len(pos_inds) + len( neg_inds) if self.sampling else len(pos_inds) num_total_samples = max(1, num_total_samples) for idx, feat_size in enumerate(self.featmap_sizes_list): feat_anchor_num = feat_size[0] * feat_size[1] # step1: get data feat_labels = labels[st_idx:st_idx + feat_anchor_num] feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num] feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :] feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :] # step2: calc cls loss feat_labels = feat_labels.reshape(-1) feat_label_weights = feat_label_weights.reshape(-1) odm_cls_score = odm_cls_branch_list[idx] odm_cls_score = paddle.squeeze(odm_cls_score, axis=0) odm_cls_score1 = odm_cls_score feat_labels = paddle.to_tensor(feat_labels) feat_labels_one_hot = paddle.nn.functional.one_hot( feat_labels, self.cls_out_channels + 1) feat_labels_one_hot = feat_labels_one_hot[:, 1:] feat_labels_one_hot.stop_gradient = True num_total_samples = paddle.to_tensor( num_total_samples, dtype='float32', stop_gradient=True) odm_cls = F.sigmoid_focal_loss( odm_cls_score1, feat_labels_one_hot, normalizer=num_total_samples, reduction='none') feat_label_weights = feat_label_weights.reshape( feat_label_weights.shape[0], 1) feat_label_weights = np.repeat( feat_label_weights, self.cls_out_channels, axis=1) feat_label_weights = paddle.to_tensor(feat_label_weights) feat_label_weights.stop_gradient = True odm_cls = odm_cls * feat_label_weights odm_cls_total = paddle.sum(odm_cls) odm_cls_losses.append(odm_cls_total) # # step3: regression loss feat_bbox_targets = paddle.to_tensor( feat_bbox_targets, dtype='float32') feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5]) feat_bbox_targets.stop_gradient = True odm_bbox_pred = odm_reg_branch_list[idx] odm_bbox_pred = paddle.squeeze(odm_bbox_pred, axis=0) odm_bbox_pred = paddle.reshape(odm_bbox_pred, [-1, 5]) odm_bbox = self.smooth_l1_loss(odm_bbox_pred, feat_bbox_targets) loss_weight = paddle.to_tensor( self.reg_loss_weight, dtype='float32', stop_gradient=True) odm_bbox = paddle.multiply(odm_bbox, loss_weight) feat_bbox_weights = paddle.to_tensor( feat_bbox_weights, stop_gradient=True) if reg_loss_type == 'l1': odm_bbox = odm_bbox * feat_bbox_weights odm_bbox_total = paddle.sum(odm_bbox) / num_total_samples elif reg_loss_type == 'iou' or reg_loss_type == 'gwd': odm_bbox = paddle.sum(odm_bbox, axis=-1) feat_bbox_weights = paddle.sum(feat_bbox_weights, axis=-1) try: from rbox_iou_ops import rbox_iou except Exception as e: print("import custom_ops error, try install rbox_iou_ops " \ "following ppdet/ext_op/README.md", e) sys.stdout.flush() sys.exit(-1) # calc iou odm_bbox_decode = self.delta2rbox(self.refine_anchor_list[idx], odm_bbox_pred) bbox_gt_bboxes = paddle.to_tensor( bbox_gt_bboxes, dtype=odm_bbox_decode.dtype, place=odm_bbox_decode.place) bbox_gt_bboxes.stop_gradient = True iou = rbox_iou(odm_bbox_decode, bbox_gt_bboxes) iou = paddle.diag(iou) if reg_loss_type == 'gwd': bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx + feat_anchor_num, :] odm_bbox_total = self.gwd_loss(odm_bbox_decode, bbox_gt_bboxes_level) odm_bbox_total = odm_bbox_total * feat_bbox_weights odm_bbox_total = paddle.sum( odm_bbox_total) / num_total_samples odm_bbox_losses.append(odm_bbox_total) st_idx += feat_anchor_num odm_cls_loss = paddle.add_n(odm_cls_losses) odm_cls_loss_weight = paddle.to_tensor( self.cls_loss_weight[1], dtype='float32', stop_gradient=True) odm_cls_loss = odm_cls_loss * odm_cls_loss_weight odm_reg_loss = paddle.add_n(odm_bbox_losses) return odm_cls_loss, odm_reg_loss def get_loss(self, inputs): # inputs: im_id image im_shape scale_factor gt_bbox gt_class is_crowd # compute loss fam_cls_loss_lst = [] fam_reg_loss_lst = [] odm_cls_loss_lst = [] odm_reg_loss_lst = [] im_shape = inputs['im_shape'] for im_id in range(im_shape.shape[0]): np_im_shape = inputs['im_shape'][im_id].numpy() np_scale_factor = inputs['scale_factor'][im_id].numpy() # data_format: (xc, yc, w, h, theta) gt_bboxes = inputs['gt_rbox'][im_id].numpy() gt_labels = inputs['gt_class'][im_id].numpy() is_crowd = inputs['is_crowd'][im_id].numpy() gt_labels = gt_labels + 1 # featmap_sizes anchors_list_all = np.concatenate(self.base_anchors_list) # get im_feat fam_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[0]] fam_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[1]] odm_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[2]] odm_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[3]] im_s2anet_head_out = (fam_cls_feats_list, fam_reg_feats_list, odm_cls_feats_list, odm_reg_feats_list) # FAM im_fam_target = self.anchor_assign(anchors_list_all, gt_bboxes, gt_labels, is_crowd) if im_fam_target is not None: im_fam_cls_loss, im_fam_reg_loss = self.get_fam_loss( im_fam_target, im_s2anet_head_out, self.reg_loss_type) fam_cls_loss_lst.append(im_fam_cls_loss) fam_reg_loss_lst.append(im_fam_reg_loss) # ODM np_refine_anchors_list = paddle.concat( self.refine_anchor_list).numpy() np_refine_anchors_list = np.concatenate(np_refine_anchors_list) np_refine_anchors_list = np_refine_anchors_list.reshape(-1, 5) im_odm_target = self.anchor_assign(np_refine_anchors_list, gt_bboxes, gt_labels, is_crowd) if im_odm_target is not None: im_odm_cls_loss, im_odm_reg_loss = self.get_odm_loss( im_odm_target, im_s2anet_head_out, self.reg_loss_type) odm_cls_loss_lst.append(im_odm_cls_loss) odm_reg_loss_lst.append(im_odm_reg_loss) fam_cls_loss = paddle.add_n(fam_cls_loss_lst) fam_reg_loss = paddle.add_n(fam_reg_loss_lst) odm_cls_loss = paddle.add_n(odm_cls_loss_lst) odm_reg_loss = paddle.add_n(odm_reg_loss_lst) return { 'fam_cls_loss': fam_cls_loss, 'fam_reg_loss': fam_reg_loss, 'odm_cls_loss': odm_cls_loss, 'odm_reg_loss': odm_reg_loss } def get_bboxes(self, cls_score_list, bbox_pred_list, mlvl_anchors, nms_pre, cls_out_channels, use_sigmoid_cls): assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] idx = 0 for cls_score, bbox_pred, anchors in zip(cls_score_list, bbox_pred_list, mlvl_anchors): cls_score = paddle.reshape(cls_score, [-1, cls_out_channels]) if use_sigmoid_cls: scores = F.sigmoid(cls_score) else: scores = F.softmax(cls_score, axis=-1) # bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 5) bbox_pred = paddle.transpose(bbox_pred, [1, 2, 0]) bbox_pred = paddle.reshape(bbox_pred, [-1, 5]) anchors = paddle.reshape(anchors, [-1, 5]) if scores.shape[0] > nms_pre: # Get maximum scores for foreground classes. if use_sigmoid_cls: max_scores = paddle.max(scores, axis=1) else: max_scores = paddle.max(scores[:, 1:], axis=1) topk_val, topk_inds = paddle.topk(max_scores, nms_pre) anchors = paddle.gather(anchors, topk_inds) bbox_pred = paddle.gather(bbox_pred, topk_inds) scores = paddle.gather(scores, topk_inds) bbox_delta = paddle.reshape(bbox_pred, [-1, 5]) bboxes = self.delta2rbox(anchors, bbox_delta) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) idx += 1 mlvl_bboxes = paddle.concat(mlvl_bboxes, axis=0) mlvl_scores = paddle.concat(mlvl_scores) return mlvl_scores, mlvl_bboxes def rect2rbox(self, bboxes): """ :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax) :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle) """ bboxes = paddle.reshape(bboxes, [-1, 4]) num_boxes = paddle.shape(bboxes)[0] x_ctr = (bboxes[:, 2] + bboxes[:, 0]) / 2.0 y_ctr = (bboxes[:, 3] + bboxes[:, 1]) / 2.0 edges1 = paddle.abs(bboxes[:, 2] - bboxes[:, 0]) edges2 = paddle.abs(bboxes[:, 3] - bboxes[:, 1]) rbox_w = paddle.maximum(edges1, edges2) rbox_h = paddle.minimum(edges1, edges2) # set angle inds = edges1 < edges2 inds = paddle.cast(inds, 'int32') rboxes_angle = inds * np.pi / 2.0 rboxes = paddle.stack( (x_ctr, y_ctr, rbox_w, rbox_h, rboxes_angle), axis=1) return rboxes # deltas to rbox def delta2rbox(self, rrois, deltas, wh_ratio_clip=1e-6): """ :param rrois: (cx, cy, w, h, theta) :param deltas: (dx, dy, dw, dh, dtheta) :param means: means of anchor :param stds: stds of anchor :param wh_ratio_clip: clip threshold of wh_ratio :return: """ deltas = paddle.reshape(deltas, [-1, 5]) rrois = paddle.reshape(rrois, [-1, 5]) # fix dy2st bug denorm_deltas = deltas * self.stds + self.means denorm_deltas = paddle.add( paddle.multiply(deltas, self.stds), self.means) dx = denorm_deltas[:, 0] dy = denorm_deltas[:, 1] dw = denorm_deltas[:, 2] dh = denorm_deltas[:, 3] dangle = denorm_deltas[:, 4] max_ratio = np.abs(np.log(wh_ratio_clip)) dw = paddle.clip(dw, min=-max_ratio, max=max_ratio) dh = paddle.clip(dh, min=-max_ratio, max=max_ratio) rroi_x = rrois[:, 0] rroi_y = rrois[:, 1] rroi_w = rrois[:, 2] rroi_h = rrois[:, 3] rroi_angle = rrois[:, 4] gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin( rroi_angle) + rroi_x gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos( rroi_angle) + rroi_y gw = rroi_w * dw.exp() gh = rroi_h * dh.exp() ga = np.pi * dangle + rroi_angle ga = (ga + np.pi / 4) % np.pi - np.pi / 4 ga = paddle.to_tensor(ga) gw = paddle.to_tensor(gw, dtype='float32') gh = paddle.to_tensor(gh, dtype='float32') bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1) return bboxes def bbox_decode(self, bbox_preds, anchors): """decode bbox from deltas Args: bbox_preds: [N,H,W,5] anchors: [H*W,5] return: bboxes: [N,H,W,5] """ num_imgs, H, W, _ = bbox_preds.shape bbox_delta = paddle.reshape(bbox_preds, [-1, 5]) bboxes = self.delta2rbox(anchors, bbox_delta) return bboxes def trace(self, A): tr = paddle.diagonal(A, axis1=-2, axis2=-1) tr = paddle.sum(tr, axis=-1) return tr def sqrt_newton_schulz_autograd(self, A, numIters): A_shape = A.shape batchSize = A_shape[0] dim = A_shape[1] normA = A * A normA = paddle.sum(normA, axis=1) normA = paddle.sum(normA, axis=1) normA = paddle.sqrt(normA) normA1 = normA.reshape([batchSize, 1, 1]) Y = paddle.divide(A, paddle.expand_as(normA1, A)) I = paddle.eye(dim, dim).reshape([1, dim, dim]) l0 = [] for i in range(batchSize): l0.append(I) I = paddle.concat(l0, axis=0) I.stop_gradient = False Z = paddle.eye(dim, dim).reshape([1, dim, dim]) l1 = [] for i in range(batchSize): l1.append(Z) Z = paddle.concat(l1, axis=0) Z.stop_gradient = False for i in range(numIters): T = 0.5 * (3.0 * I - Z.bmm(Y)) Y = Y.bmm(T) Z = T.bmm(Z) sA = Y * paddle.sqrt(normA1).reshape([batchSize, 1, 1]) sA = paddle.expand_as(sA, A) return sA def wasserstein_distance_sigma(sigma1, sigma2): wasserstein_distance_item2 = paddle.matmul( sigma1, sigma1) + paddle.matmul( sigma2, sigma2) - 2 * self.sqrt_newton_schulz_autograd( paddle.matmul( paddle.matmul(sigma1, paddle.matmul(sigma2, sigma2)), sigma1), 10) wasserstein_distance_item2 = self.trace(wasserstein_distance_item2) return wasserstein_distance_item2 def xywhr2xyrs(self, xywhr): xywhr = paddle.reshape(xywhr, [-1, 5]) xy = xywhr[:, :2] wh = paddle.clip(xywhr[:, 2:4], min=1e-7, max=1e7) r = xywhr[:, 4] cos_r = paddle.cos(r) sin_r = paddle.sin(r) R = paddle.stack( (cos_r, -sin_r, sin_r, cos_r), axis=-1).reshape([-1, 2, 2]) S = 0.5 * paddle.nn.functional.diag_embed(wh) return xy, R, S def gwd_loss(self, pred, target, fun='log', tau=1.0, alpha=1.0, normalize=False): xy_p, R_p, S_p = self.xywhr2xyrs(pred) xy_t, R_t, S_t = self.xywhr2xyrs(target) xy_distance = (xy_p - xy_t).square().sum(axis=-1) Sigma_p = R_p.matmul(S_p.square()).matmul(R_p.transpose([0, 2, 1])) Sigma_t = R_t.matmul(S_t.square()).matmul(R_t.transpose([0, 2, 1])) whr_distance = paddle.diagonal( S_p, axis1=-2, axis2=-1).square().sum(axis=-1) whr_distance = whr_distance + paddle.diagonal( S_t, axis1=-2, axis2=-1).square().sum(axis=-1) _t = Sigma_p.matmul(Sigma_t) _t_tr = paddle.diagonal(_t, axis1=-2, axis2=-1).sum(axis=-1) _t_det_sqrt = paddle.diagonal(S_p, axis1=-2, axis2=-1).prod(axis=-1) _t_det_sqrt = _t_det_sqrt * paddle.diagonal( S_t, axis1=-2, axis2=-1).prod(axis=-1) whr_distance = whr_distance + (-2) * ( (_t_tr + 2 * _t_det_sqrt).clip(0).sqrt()) distance = (xy_distance + alpha * alpha * whr_distance).clip(0) if normalize: wh_p = pred[..., 2:4].clip(min=1e-7, max=1e7) wh_t = target[..., 2:4].clip(min=1e-7, max=1e7) scale = ((wh_p.log() + wh_t.log()).sum(dim=-1) / 4).exp() distance = distance / scale if fun == 'log': distance = paddle.log1p(distance) if tau >= 1.0: return 1 - 1 / (tau + distance) return distance
ppdet/modeling/heads/s2anet_head.py
42,021
AnchorGenerator by paddle S2Anet head Args: stacked_convs (int): number of stacked_convs feat_in (int): input channels of feat feat_out (int): output channels of feat num_classes (int): num_classes anchor_strides (list): stride of anchors anchor_scales (list): scale of anchors anchor_ratios (list): ratios of anchors target_means (list): target_means target_stds (list): target_stds align_conv_type (str): align_conv_type ['Conv', 'AlignConv'] align_conv_size (int): kernel size of align_conv use_sigmoid_cls (bool): use sigmoid_cls or not reg_loss_weight (list): loss weight for regression decode bbox from deltas Args: bbox_preds: [N,H,W,5] anchors: [H*W,5] return: bboxes: [N,H,W,5] :param rrois: (cx, cy, w, h, theta) :param deltas: (dx, dy, dw, dh, dtheta) :param means: means of anchor :param stds: stds of anchor :param wh_ratio_clip: clip threshold of wh_ratio :return: Args: anchors: [M,5] xc,yc,w,h,angle featmap_size: (feat_h, feat_w) stride: 8 Returns: :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax) :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle) Args: pred: pred score label: label delta: delta Returns: loss Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/models/anchor_heads_rotated/s2anet_head.py featmap_size*stride project it to original area (NA,5) get sampling locations of default conv get sampling locations of anchors x_ctr, y_ctr, w, h, a = np.unbind(anchors, dim=1) get offset filed anchor ODM ch_in = int(self.feat_out / 8) if i == 0 else self.feat_out [N, CLS, H, W] --> [N, H, W, CLS] [N, 5, H, W] --> [N, H, W, 5] prepare anchor [N, CLS, H, W] --> [N, H, W, CLS] [N, 5, H, W] --> [N, H, W, 5] step1: get data step2: calc cls loss step3: regression loss calc iou step1: get data step2: calc cls loss step3: regression loss calc iou inputs: im_id image im_shape scale_factor gt_bbox gt_class is_crowd compute loss data_format: (xc, yc, w, h, theta) featmap_sizes get im_feat FAM ODM bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 5) Get maximum scores for foreground classes. set angle deltas to rbox fix dy2st bug denorm_deltas = deltas * self.stds + self.means
2,813
en
0.595265
from http.server import HTTPServer, BaseHTTPRequestHandler from socketserver import ThreadingMixIn from .redirect import RedirectHandler import threading import ssl __all__ = ['ThreadedServer', 'SecureServer'] class ThreadedServer(ThreadingMixIn, HTTPServer): protocol_version = 'HTTP/1.1' def __init__(self, host: str, port: int, RequestHandlerClass: BaseHTTPRequestHandler, bind_and_activate: bool=True): self._serve_forever_thread = None # type: threading.Thread super().__init__((host, port), RequestHandlerClass, bind_and_activate) def serve_forever(self, poll_interval=0.5): self._serve_forever_thread = threading.Thread( target=super().serve_forever, args=(poll_interval,) ) self._serve_forever_thread.start() class SecureServer(ThreadedServer): def __init__(self, certfile: str, keyfile: str, host: str, port: int, RequestHandlerClass: BaseHTTPRequestHandler, bind_and_activate: bool = True): self._certfile = certfile self._keyfile = keyfile self._redirect = ThreadedServer(host, 80, RedirectHandler, bind_and_activate) super().__init__(host, port, RequestHandlerClass, bind_and_activate) def server_bind(self): super().server_bind() self._redirect.server_bind() self.socket = ssl.wrap_socket(self.socket, server_side=True, certfile=self._certfile, keyfile=self._keyfile, do_handshake_on_connect=False) def get_request(self): sock, addr = super().get_request() sock.do_handshake() return sock, addr def serve_forever(self, poll_interval=0.5): super().serve_forever(poll_interval) self._redirect.serve_forever(poll_interval) def shutdown(self): super().shutdown() self._redirect.shutdown()
serpent_server/server.py
2,260
type: threading.Thread
22
en
0.526765
# Generated by Django 2.1.3 on 2019-01-07 17:48 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0014_auto_20190107_2251'), ] operations = [ migrations.DeleteModel( name='PostPicks', ), migrations.AddField( model_name='post', name='image', field=models.ImageField(default='default.jpg', upload_to='post_pics'), ), ]
blog/migrations/0015_auto_20190107_2318.py
483
Generated by Django 2.1.3 on 2019-01-07 17:48
45
en
0.754562
# Copyright (C) 2008-today The SG++ project # This file is part of the SG++ project. For conditions of distribution and # use, please see the copyright notice provided with SG++ or at # sgpp.sparsegrids.org # This file is part of SGClass, a program package making use of spatially adaptive sparse grids to solve numerical problems # # Copyright (C) 2007 Dirk Pflueger (pflueged@in.tum.de) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with pyclass. If not, see <http://www.gnu.org/licenses/>. ## @package classifier # @ingroup bin # @brief Show some statistics for data files #Handles either ARFF-files or plain data files. If the ARFF file #contains a class attribute, it also shows statistics for the class #distribution. # # Help with <tt>--help</tt>. # @version $CURR$ from optparse import OptionParser import sys, os, math from tools import * parser = OptionParser() parser.set_usage('%prog [options]\n\t Gives some statistics for datasets, given either in arff or simple file format') parser.add_option("-i", "--infile", action="append", type="string", dest="infiles", help="Specifies the inputfiles to analyse.") (options,args)=parser.parse_args() if options.infiles == None: parser.parse_args(['-h']) # loop over infiles for filename in options.infiles: try: # read in files print "================= %20s =================" %(filename) ftype = isARFFFile(filename) if ftype == ARFF: dataset = readDataARFF(filename) elif ftype == SIMPLE: dataset = readDataTrivial(filename) else: sys.stderr.write("Skipping "+filename+os.linesep) continue # analyse data # header dim = len(dataset["data"]) numpoints = len(dataset["data"][0]) print "Dim (#attributes): %d"%(dim) print " %-4s %-12s %-12s %-12s %-12s %-12s" % ("Dim", "Min", "Max", "mean", "unbiased V", "samplestddev") # traverse all attributes for i in range(dim): total_sum = sum(dataset["data"][i]) mean = total_sum/float(numpoints) unbiased_variance = sum(map(lambda x: (x-mean)**2, dataset["data"][i]))/float(numpoints-1) sample_stddev = math.sqrt(unbiased_variance) print " %02d %12f %12f %12f %12f %12f" %(i+1, min(dataset["data"][i]), max(dataset["data"][i]), mean, unbiased_variance, sample_stddev) print "#data points: %d"%(numpoints) # statistics for class distribution if dataset.has_key("classes"): print "Class distribution:" class_count = {} for c in dataset["classes"]: if class_count.has_key(c): class_count[c] += 1 else: class_count[c] = 1 class_values = class_count.keys() class_values.sort() for c in class_values: print " %12f %d" % (c, class_count[c]) except Exception, e: sys.stderr.write("ERROR: Skipping "+filename+os.linesep) print " ",e
lib/pysgpp/extensions/misc/datasetAnalysis.py
4,137
Copyright (C) 2008-today The SG++ project This file is part of the SG++ project. For conditions of distribution and use, please see the copyright notice provided with SG++ or at sgpp.sparsegrids.org This file is part of SGClass, a program package making use of spatially adaptive sparse grids to solve numerical problems Copyright (C) 2007 Dirk Pflueger (pflueged@in.tum.de) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with pyclass. If not, see <http://www.gnu.org/licenses/>. @package classifier @ingroup bin @brief Show some statistics for data filesHandles either ARFF-files or plain data files. If the ARFF filecontains a class attribute, it also shows statistics for the classdistribution. Help with <tt>--help</tt>. @version $CURR$ loop over infiles read in files analyse data header traverse all attributes statistics for class distribution
1,374
en
0.848818
"""Entrypoint for the WSGI app (web API) """ from . import api application = api.create_app()
krcg_api/wsgi.py
95
Entrypoint for the WSGI app (web API)
37
en
0.682141
"""Utilties for distributed processing""" import horovod.tensorflow.keras as hvd def rank(): try: return hvd.rank() except ValueError: return 0 def barrier(): try: hvd.allreduce([], name='Barrier') except ValueError: pass
CSCS/benchmarks/cosmoflow/implementations/cosmoflow-benchmark/utils/distributed.py
273
Utilties for distributed processing
35
en
0.850731