hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9c8ecd34b3dc863a6fd76369dcf46dd316b20dd | 22,516 | py | Python | rolls/util/keyring_wrapper.py | strandedathome/rolls-blockchain | d9a813753188275c6528e21d18e86e0eadf9bd00 | [
"MIT"
] | 12 | 2021-11-14T10:47:31.000Z | 2022-02-09T04:31:52.000Z | rolls/util/keyring_wrapper.py | strandedathome/rolls-blockchain | d9a813753188275c6528e21d18e86e0eadf9bd00 | [
"MIT"
] | 13 | 2021-11-15T14:59:39.000Z | 2022-03-29T09:13:13.000Z | rolls/util/keyring_wrapper.py | strandedathome/rolls-blockchain | d9a813753188275c6528e21d18e86e0eadf9bd00 | [
"MIT"
] | 1 | 2021-11-12T20:32:49.000Z | 2021-11-12T20:32:49.000Z | import asyncio
import keyring as keyring_main
from blspy import PrivateKey # pyright: reportMissingImports=false
from rolls.util.default_root import DEFAULT_KEYS_ROOT_PATH
from rolls.util.file_keyring import FileKeyring
from rolls.util.misc import prompt_yes_no
from keyrings.cryptfile.cryptfile import CryptFileKeyring # pyright: reportMissingImports=false
from keyring.backends.macOS import Keyring as MacKeyring
from keyring.errors import KeyringError
from pathlib import Path
from sys import exit, platform
from typing import Any, List, Optional, Tuple, Type, Union
# We want to protect the keyring, even if a user-specified master passphrase isn't provided
#
# WARNING: Changing the default passphrase will prevent passphrase-less users from accessing
# their existing keys. Using a new default passphrase requires migrating existing users to
# the new passphrase.
DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE = "$ rolls passphrase set # all the cool kids are doing it!"
MAC_KEYCHAIN_MASTER_PASSPHRASE_SERVICE = "PecanRolls Passphrase"
MAC_KEYCHAIN_MASTER_PASSPHRASE_USER = "PecanRolls Passphrase"
def check_macos_keychain_keys_present(mac_keychain: MacKeyring) -> bool:
from keyring.credentials import SimpleCredential
from rolls.util.keychain import default_keychain_user, default_keychain_service, get_private_key_user, MAX_KEYS
keychain_user: str = default_keychain_user()
keychain_service: str = default_keychain_service()
for index in range(0, MAX_KEYS):
current_user: str = get_private_key_user(keychain_user, index)
credential: Optional[SimpleCredential] = mac_keychain.get_credential(keychain_service, current_user)
if credential is not None:
return True
return False
def warn_if_macos_errSecInteractionNotAllowed(error: KeyringError):
"""
Check if the macOS Keychain error is errSecInteractionNotAllowed. This commonly
occurs when the keychain is accessed while headless (such as remoting into a Mac
via SSH). Because macOS Keychain operations may require prompting for login creds,
a connection to the WindowServer is required.
"""
if "-25308" in str(error):
print(
"WARNING: Unable to access the macOS Keychain (-25308 errSecInteractionNotAllowed). "
"Are you logged-in remotely?"
)
class KeyringWrapper:
"""
KeyringWrapper provides an abstraction that the Keychain class can use
without requiring knowledge of the keyring backend. During initialization,
a keyring backend is selected based on the OS.
The wrapper is implemented as a singleton, as it may need to manage state
related to the master passphrase and handle migration from the legacy
CryptFileKeyring implementation.
"""
# Static members
__shared_instance = None
__keys_root_path: Path = DEFAULT_KEYS_ROOT_PATH
# Instance members
keys_root_path: Path
keyring: Union[Any, FileKeyring] = None
cached_passphrase: Optional[str] = None
cached_passphrase_is_validated: bool = False
legacy_keyring = None
def __init__(self, keys_root_path: Path = DEFAULT_KEYS_ROOT_PATH):
"""
Initializes the keyring backend based on the OS. For Linux, we previously
used CryptFileKeyring. We now use our own FileKeyring backend and migrate
the data from the legacy CryptFileKeyring (on write).
"""
self.keys_root_path = keys_root_path
self.refresh_keyrings()
def refresh_keyrings(self):
self.keyring = None
self.keyring = self._configure_backend()
# Configure the legacy keyring if keyring passphrases are supported to support migration (if necessary)
self.legacy_keyring = self._configure_legacy_backend()
# Initialize the cached_passphrase
self.cached_passphrase = self._get_initial_cached_passphrase()
def _configure_backend(self) -> Union[Any, FileKeyring]:
from rolls.util.keychain import supports_keyring_passphrase
if self.keyring:
raise Exception("KeyringWrapper has already been instantiated")
if platform == "win32" or platform == "cygwin":
import keyring.backends.Windows
keyring.set_keyring(keyring.backends.Windows.WinVaultKeyring())
# TODO: New keyring + passphrase support can be enabled for Windows by updating
# supports_keyring_passphrase() and uncommenting the lines below. Leaving the
# lines below in place for testing.
#
# if supports_keyring_passphrase():
# keyring = FileKeyring(keys_root_path=self.keys_root_path) # type: ignore
# else:
# keyring.set_keyring(keyring.backends.Windows.WinVaultKeyring())
elif platform == "darwin":
if supports_keyring_passphrase():
keyring = FileKeyring(keys_root_path=self.keys_root_path) # type: ignore
else:
keyring = MacKeyring() # type: ignore
keyring_main.set_keyring(keyring)
elif platform == "linux":
if supports_keyring_passphrase():
keyring = FileKeyring(keys_root_path=self.keys_root_path) # type: ignore
else:
keyring = CryptFileKeyring()
keyring.keyring_key = "your keyring password" # type: ignore
else:
keyring = keyring_main
return keyring
def _configure_legacy_backend(self) -> Union[CryptFileKeyring, MacKeyring]:
# If keyring.yaml isn't found or is empty, check if we're using CryptFileKeyring or the Mac Keychain
filekeyring = self.keyring if type(self.keyring) == FileKeyring else None
if filekeyring and not filekeyring.has_content():
if platform == "linux":
old_keyring = CryptFileKeyring()
if Path(old_keyring.file_path).is_file():
# After migrating content from legacy_keyring, we'll prompt to clear those keys
old_keyring.keyring_key = "your keyring password" # type: ignore
return old_keyring
elif platform == "darwin":
mac_keychain: MacKeyring = MacKeyring()
if check_macos_keychain_keys_present(mac_keychain):
return mac_keychain
return None
def _get_initial_cached_passphrase(self) -> str:
from rolls.util.keychain import supports_os_passphrase_storage
passphrase: Optional[str] = None
if supports_os_passphrase_storage():
passphrase = self.get_master_passphrase_from_credential_store()
if passphrase is None:
passphrase = DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE
return passphrase
@staticmethod
def set_keys_root_path(keys_root_path: Path):
"""
Used to set the keys_root_path prior to instantiating the __shared_instance
"""
KeyringWrapper.__keys_root_path = keys_root_path
@staticmethod
def get_shared_instance(create_if_necessary=True):
if not KeyringWrapper.__shared_instance and create_if_necessary:
KeyringWrapper.__shared_instance = KeyringWrapper(keys_root_path=KeyringWrapper.__keys_root_path)
return KeyringWrapper.__shared_instance
@staticmethod
def cleanup_shared_instance():
KeyringWrapper.__shared_instance = None
def get_keyring(self):
"""
Return the current keyring backend. The legacy keyring is preferred if it's in use
"""
return self.keyring if not self.using_legacy_keyring() else self.legacy_keyring
def using_legacy_keyring(self) -> bool:
return self.legacy_keyring is not None
# Master passphrase support
def keyring_supports_master_passphrase(self) -> bool:
return type(self.get_keyring()) in [FileKeyring]
def get_cached_master_passphrase(self) -> Tuple[Optional[str], bool]:
"""
Returns a tuple including the currently cached passphrase and a bool
indicating whether the passphrase has been previously validated.
"""
return self.cached_passphrase, self.cached_passphrase_is_validated
def set_cached_master_passphrase(self, passphrase: Optional[str], validated=False) -> None:
"""
Cache the provided passphrase and optionally indicate whether the passphrase
has been validated.
"""
self.cached_passphrase = passphrase
self.cached_passphrase_is_validated = validated
def has_cached_master_passphrase(self) -> bool:
passphrase = self.get_cached_master_passphrase()
return passphrase is not None and len(passphrase) > 0
def has_master_passphrase(self) -> bool:
"""
Returns a bool indicating whether the underlying keyring data
is secured by a master passphrase.
"""
return self.keyring_supports_master_passphrase() and self.keyring.has_content()
def master_passphrase_is_valid(self, passphrase: str, force_reload: bool = False) -> bool:
return self.keyring.check_passphrase(passphrase, force_reload=force_reload)
def set_master_passphrase(
self,
current_passphrase: Optional[str],
new_passphrase: str,
*,
write_to_keyring: bool = True,
allow_migration: bool = True,
save_passphrase: bool = False,
) -> None:
"""
Sets a new master passphrase for the keyring
"""
from rolls.util.keychain import (
KeyringCurrentPassphraseIsInvalid,
KeyringRequiresMigration,
supports_os_passphrase_storage,
)
# Require a valid current_passphrase
if (
self.has_master_passphrase()
and current_passphrase is not None
and not self.master_passphrase_is_valid(current_passphrase)
):
raise KeyringCurrentPassphraseIsInvalid("invalid current passphrase")
self.set_cached_master_passphrase(new_passphrase, validated=True)
if write_to_keyring:
# We'll migrate the legacy contents to the new keyring at this point
if self.using_legacy_keyring():
if not allow_migration:
raise KeyringRequiresMigration("keyring requires migration")
self.migrate_legacy_keyring_interactive()
else:
# We're reencrypting the keyring contents using the new passphrase. Ensure that the
# payload has been decrypted by calling load_keyring with the current passphrase.
self.keyring.load_keyring(passphrase=current_passphrase)
self.keyring.write_keyring(fresh_salt=True) # Create a new salt since we're changing the passphrase
if supports_os_passphrase_storage():
if save_passphrase:
self.save_master_passphrase_to_credential_store(new_passphrase)
else:
self.remove_master_passphrase_from_credential_store()
def remove_master_passphrase(self, current_passphrase: Optional[str]) -> None:
"""
Remove the user-specific master passphrase. We still keep the keyring contents encrypted
using the default passphrase.
"""
self.set_master_passphrase(current_passphrase, DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE)
def save_master_passphrase_to_credential_store(self, passphrase: str) -> None:
if platform == "darwin":
mac_keychain = MacKeyring()
try:
mac_keychain.set_password(
MAC_KEYCHAIN_MASTER_PASSPHRASE_SERVICE, MAC_KEYCHAIN_MASTER_PASSPHRASE_USER, passphrase
)
except KeyringError as e:
warn_if_macos_errSecInteractionNotAllowed(e)
return None
def remove_master_passphrase_from_credential_store(self) -> None:
if platform == "darwin":
mac_keychain = MacKeyring()
try:
mac_keychain.delete_password(
MAC_KEYCHAIN_MASTER_PASSPHRASE_SERVICE, MAC_KEYCHAIN_MASTER_PASSPHRASE_USER
)
except KeyringError as e:
warn_if_macos_errSecInteractionNotAllowed(e)
return None
def get_master_passphrase_from_credential_store(self) -> Optional[str]:
if platform == "darwin":
mac_keychain = MacKeyring()
try:
return mac_keychain.get_password(
MAC_KEYCHAIN_MASTER_PASSPHRASE_SERVICE, MAC_KEYCHAIN_MASTER_PASSPHRASE_USER
)
except KeyringError as e:
warn_if_macos_errSecInteractionNotAllowed(e)
return None
# Legacy keyring migration
class MigrationResults:
def __init__(
self,
original_private_keys: List[Tuple[PrivateKey, bytes]],
legacy_keyring: Any,
keychain_service: str,
keychain_users: List[str],
):
self.original_private_keys = original_private_keys
self.legacy_keyring = legacy_keyring
self.keychain_service = keychain_service
self.keychain_users = keychain_users
def confirm_migration(self) -> bool:
"""
Before beginning migration, we'll notify the user that the legacy keyring needs to be
migrated and warn about backing up the mnemonic seeds.
If a master passphrase hasn't been explicitly set yet, we'll attempt to prompt and set
the passphrase prior to beginning migration.
"""
master_passphrase, _ = self.get_cached_master_passphrase()
if master_passphrase == DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE:
print(
"\nYour existing keys need to be migrated to a new keyring that is optionally secured by a master "
"passphrase."
)
print(
"Would you like to set a master passphrase now? Use 'rolls passphrase set' to change the passphrase.\n"
)
response = prompt_yes_no("Set keyring master passphrase? (y/n) ")
if response:
from rolls.cmds.passphrase_funcs import prompt_for_new_passphrase
# Prompt for a master passphrase and cache it
new_passphrase, save_passphrase = prompt_for_new_passphrase()
self.set_master_passphrase(
current_passphrase=None,
new_passphrase=new_passphrase,
write_to_keyring=False,
save_passphrase=save_passphrase,
)
else:
print(
"Will skip setting a master passphrase. Use 'rolls passphrase set' to set the master passphrase.\n"
)
else:
import colorama
colorama.init()
print("\nYour existing keys will be migrated to a new keyring that is secured by your master passphrase")
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + "WARNING: " + colorama.Style.RESET_ALL, end="")
print(
"It is strongly recommended that you ensure you have a copy of the mnemonic seed for each of your "
"keys prior to beginning migration\n"
)
return prompt_yes_no("Begin keyring migration? (y/n) ")
def migrate_legacy_keys(self) -> MigrationResults:
from rolls.util.keychain import get_private_key_user, Keychain, MAX_KEYS
print("Migrating contents from legacy keyring")
keychain: Keychain = Keychain()
# Obtain contents from the legacy keyring. When using the Keychain interface
# to read, the legacy keyring will be preferred over the new keyring.
original_private_keys = keychain.get_all_private_keys()
service = keychain.service
user_passphrase_pairs = []
index = 0
user = get_private_key_user(keychain.user, index)
while index <= MAX_KEYS:
# Build up a list of user/passphrase tuples from the legacy keyring contents
if user is not None:
passphrase = self.get_passphrase(service, user)
if passphrase is not None:
user_passphrase_pairs.append((user, passphrase))
index += 1
user = get_private_key_user(keychain.user, index)
# Write the keys directly to the new keyring (self.keyring)
for (user, passphrase) in user_passphrase_pairs:
self.keyring.set_password(service, user, passphrase)
return KeyringWrapper.MigrationResults(
original_private_keys, self.legacy_keyring, service, [user for (user, _) in user_passphrase_pairs]
)
def verify_migration_results(self, migration_results: MigrationResults) -> bool:
from rolls.util.keychain import Keychain
# Stop using the legacy keyring. This will direct subsequent reads to the new keyring.
self.legacy_keyring = None
success: bool = False
print("Verifying migration results...", end="")
# Compare the original keyring contents with the new
try:
keychain: Keychain = Keychain()
original_private_keys = migration_results.original_private_keys
post_migration_private_keys = keychain.get_all_private_keys()
# Sort the key collections prior to comparing
original_private_keys.sort(key=lambda e: str(e[0]))
post_migration_private_keys.sort(key=lambda e: str(e[0]))
if post_migration_private_keys == original_private_keys:
success = True
print(" Verified")
else:
print(" Failed")
raise ValueError("Migrated keys don't match original keys")
except Exception as e:
print(f"\nMigration failed: {e}")
print("Leaving legacy keyring intact")
self.legacy_keyring = migration_results.legacy_keyring # Restore the legacy keyring
raise e
return success
def confirm_legacy_keyring_cleanup(self, migration_results) -> bool:
"""
Ask the user whether we should remove keys from the legacy keyring. In the case
of CryptFileKeyring, we can't just delete the file because other python processes
might use the same keyring file.
"""
keyring_name: str = ""
legacy_keyring_type: Type = type(migration_results.legacy_keyring)
if legacy_keyring_type is CryptFileKeyring:
keyring_name = str(migration_results.legacy_keyring.file_path)
elif legacy_keyring_type is MacKeyring:
keyring_name = "macOS Keychain"
# leaving this here for when Windows migration is supported
# elif legacy_keyring_type is Win32Keyring:
# keyring_name = "Windows Credential Manager"
prompt = "Remove keys from old keyring"
if len(keyring_name) > 0:
prompt += f" ({keyring_name})?"
else:
prompt += "?"
prompt += " (y/n) "
return prompt_yes_no(prompt)
def cleanup_legacy_keyring(self, migration_results: MigrationResults):
for user in migration_results.keychain_users:
migration_results.legacy_keyring.delete_password(migration_results.keychain_service, user)
def migrate_legacy_keyring(self, cleanup_legacy_keyring: bool = False):
results = self.migrate_legacy_keys()
success = self.verify_migration_results(results)
if success and cleanup_legacy_keyring:
self.cleanup_legacy_keyring(results)
def migrate_legacy_keyring_interactive(self):
"""
Handle importing keys from the legacy keyring into the new keyring.
Prior to beginning, we'll ensure that we at least suggest setting a master passphrase
and backing up mnemonic seeds. After importing keys from the legacy keyring, we'll
perform a before/after comparison of the keyring contents, and on success we'll prompt
to cleanup the legacy keyring.
"""
from rolls.cmds.passphrase_funcs import async_update_daemon_migration_completed_if_running
# Make sure the user is ready to begin migration.
response = self.confirm_migration()
if not response:
print("Skipping migration. Unable to proceed")
exit(0)
try:
results = self.migrate_legacy_keys()
success = self.verify_migration_results(results)
if success:
print(f"Keyring migration completed successfully ({str(self.keyring.keyring_path)})\n")
except Exception as e:
print(f"\nMigration failed: {e}")
print("Leaving legacy keyring intact")
exit(1)
# Ask if we should clean up the legacy keyring
if self.confirm_legacy_keyring_cleanup(results):
self.cleanup_legacy_keyring(results)
print("Removed keys from old keyring")
else:
print("Keys in old keyring left intact")
# Notify the daemon (if running) that migration has completed
asyncio.get_event_loop().run_until_complete(async_update_daemon_migration_completed_if_running())
# Keyring interface
def get_passphrase(self, service: str, user: str) -> str:
# Continue reading from the legacy keyring until we want to write something,
# at which point we'll migrate the legacy contents to the new keyring
if self.using_legacy_keyring():
return self.legacy_keyring.get_password(service, user) # type: ignore
return self.get_keyring().get_password(service, user)
def set_passphrase(self, service: str, user: str, passphrase: str):
# On the first write while using the legacy keyring, we'll start migration
if self.using_legacy_keyring() and self.has_cached_master_passphrase():
self.migrate_legacy_keyring_interactive()
self.get_keyring().set_password(service, user, passphrase)
def delete_passphrase(self, service: str, user: str):
# On the first write while using the legacy keyring, we'll start migration
if self.using_legacy_keyring() and self.has_cached_master_passphrase():
self.migrate_legacy_keyring_interactive()
self.get_keyring().delete_password(service, user)
| 41.851301 | 119 | 0.670545 |
f9a671fe812ccb1d4ffa0238fda1b7225252fa46 | 22,140 | py | Python | openprocurement/blade/tests/contracts.py | imaginal/openprocurement.blade | 4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb | [
"Apache-2.0"
] | null | null | null | openprocurement/blade/tests/contracts.py | imaginal/openprocurement.blade | 4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb | [
"Apache-2.0"
] | null | null | null | openprocurement/blade/tests/contracts.py | imaginal/openprocurement.blade | 4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from uuid import uuid4
from copy import deepcopy
from openprocurement.api.models import get_now
from openprocurement.edge.tests.base import ContractBaseWebTest, test_contract_data, test_document, ROUTE_PREFIX
@unittest.skipUnless(test_contract_data, "Contracts is not reachable")
class ContractResourceTest(ContractBaseWebTest):
def test_empty_listing(self):
response = self.app.get('/contracts')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/contracts?opt_jsonp=callback')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/contracts?opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/contracts?opt_jsonp=callback&opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/contracts?offset=2015-01-01T00:00:00+02:00&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
response = self.app.get('/contracts?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/contracts?feed=changes&offset=0', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Offset expired/invalid', u'location': u'params', u'name': u'offset'}
])
response = self.app.get('/contracts?feed=changes&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
def test_listing(self):
response = self.app.get('/contracts')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
contracts = []
for i in range(3):
offset = get_now().isoformat()
contracts.append(self.create_contract())
ids = ','.join([i['id'] for i in contracts])
while True:
response = self.app.get('/contracts')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in contracts]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in contracts]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in contracts]))
while True:
response = self.app.get('/contracts?offset={}'.format(offset))
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/contracts?limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/contracts', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/contracts', params=[('opt_fields', 'status,contractID')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'contractID']))
self.assertIn('opt_fields=status%2CcontractID', response.json['next_page']['uri'])
response = self.app.get('/contracts?descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in contracts]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in contracts], reverse=True))
response = self.app.get('/contracts?descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_contract_data2 = test_contract_data.copy()
test_contract_data2['mode'] = 'test'
self.create_contract(test_contract_data2)
while True:
response = self.app.get('/contracts?mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/contracts?mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_changes(self):
response = self.app.get('/contracts?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
contracts = []
for i in range(3):
contracts.append(self.create_contract())
ids = ','.join([i['id'] for i in contracts])
while True:
response = self.app.get('/contracts?feed=changes')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in contracts]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in contracts]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in contracts]))
response = self.app.get('/contracts?feed=changes&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/contracts?feed=changes', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/contracts?feed=changes', params=[('opt_fields', 'status,contractID')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'contractID']))
self.assertIn('opt_fields=status%2CcontractID', response.json['next_page']['uri'])
response = self.app.get('/contracts?feed=changes&descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in contracts]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in contracts], reverse=True))
response = self.app.get('/contracts?feed=changes&descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_contract_data2 = test_contract_data.copy()
test_contract_data2['mode'] = 'test'
self.create_contract(test_contract_data2)
while True:
response = self.app.get('/contracts?feed=changes&mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/contracts?feed=changes&mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_draft(self):
response = self.app.get('/contracts')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
contracts = []
data = test_contract_data.copy()
data.update({'status': 'draft'})
for i in range(3):
contracts.append(self.create_contract(data))
ids = ','.join([i['id'] for i in contracts])
while True:
response = self.app.get('/contracts')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in contracts]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in contracts]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in contracts]))
def test_get_contract(self):
contract = self.create_contract()
response = self.app.get('/contracts/{}'.format(contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], contract)
response = self.app.get('/contracts/{}?opt_jsonp=callback'.format(contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/contracts/{}?opt_pretty=1'.format(contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_contract_not_found(self):
response = self.app.get('/contracts')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/contracts/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'contract_id'}
])
response = self.app.patch_json(
'/contracts/some_id', {'data': {}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'contract_id'}
])
# put custom document object into database to check contract construction on non-Contract data
data = {'contract': 'test', '_id': uuid4().hex}
self.db.save(data)
response = self.app.get('/contracts/{}'.format(data['_id']), status=404)
self.assertEqual(response.status, '404 Not Found')
@unittest.skipUnless(test_contract_data, "Contracts is not reachable")
class ContractDocumentResourceTest(ContractBaseWebTest):
def test_listing(self):
contract = self.create_contract()
response = self.app.get('/contracts/{}/documents'.format(contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], contract['documents'])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/contracts/{}/documents?opt_jsonp=callback'.format(contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/contracts/{}/documents?opt_pretty=1'.format(contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/contracts/{}/documents?opt_jsonp=callback&opt_pretty=1'.format(contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
def test_listing_changes(self):
contract = self.create_contract()
data = self.db[contract['id']]
documents = data['documents']
for i in range(3):
document = deepcopy(test_document)
document['dateModified'] = get_now().isoformat()
document['id'] = uuid4().hex
documents.append(document)
self.db.save(data)
ids = ','.join([i['id'] for i in documents])
response = self.app.get('/contracts/{}/documents'.format(contract['id']))
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), len(documents))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in documents]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in documents]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in documents]))
def test_get_document(self):
contract = self.create_contract()
document = contract['documents'][0]
response = self.app.get('/contracts/{}/documents/{}'.format(contract['id'], document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], document)
response = self.app.get('/contracts/{}/documents/{}?opt_jsonp=callback'.format(contract['id'], document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/contracts/{}/documents/{}?opt_pretty=1'.format(contract['id'], document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_document_not_found(self):
contract = self.create_contract()
response = self.app.get('/contracts/{}/documents/some_id'.format(contract['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
def test_get_document_with_versions(self):
contract = self.create_contract()
data = self.db[contract['id']]
documents = data['documents']
for i in range(3):
document = deepcopy(test_document)
document['id'] = data['documents'][0]['id']
document['url'] += str(i)
document['dateModified'] = get_now().isoformat()
documents.append(document)
self.db.save(data)
versions = [{'dateModified': i['dateModified'], 'url': i['url']} for i in documents[:-1]]
response = self.app.get('/contracts/{}/documents/{}'.format(contract['id'], document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['previousVersions']), len(versions))
self.assertEqual(response.json['data']['previousVersions'], versions)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ContractResourceTest))
suite.addTest(unittest.makeSuite(ContractDocumentResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 48.340611 | 138 | 0.636766 |
cb90fdde8ea51dfd6cf1e4052a6ce364954772be | 6,268 | py | Python | util/util.py | xieyulai/MSP-STTN-DENSITY | 67ee725934530e579534774b6b66e44846c415b4 | [
"MIT"
] | 1 | 2022-03-10T11:23:14.000Z | 2022-03-10T11:23:14.000Z | util/util.py | xieyulai/MSP-STTN | 4f986b40fb0f3c292dcb6e186ed9b8aba1f7306b | [
"MIT"
] | null | null | null | util/util.py | xieyulai/MSP-STTN | 4f986b40fb0f3c292dcb6e186ed9b8aba1f7306b | [
"MIT"
] | null | null | null | import time
import math
import yaml
import torch
import cv2
import pandas as pd
import datetime
import tqdm
import pickle
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def random_mask(H, W):
t = 30
left = random.randint(0, W - t)
top = random.randint(0, H - t)
right = random.randint(left + t, W)
bottom = random.randint(top + t, H)
return (top, bottom, left, right)
def get_yaml_data(yaml_file):
file = open(yaml_file, 'r', encoding="utf-8")
file_data = file.read()
file.close()
# data = yaml.load(file_data,Loader=yaml.Fullloader)
data = yaml.load(file_data)
return data
def loadding_mask(input, path, size=(160, 120)):
# [64, 5, 1, 120, 160]
mask = cv2.imread(path)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
# mask = cv2.resize(mask,(160,120))
mask = cv2.resize(mask, size)
mask = torch.from_numpy(mask)
h, w = mask.shape
mask = mask // 255
mask = mask.reshape(1, h, w)
mask = mask.expand_as(input)
mask = mask.to(input.device)
mask = mask.float()
print(mask[:, :, :, 60:80, 90:100])
valid = mask
hole = 1 - mask
single_valid = mask
# single_valid = single_valid.to(input.device)
return hole, valid, single_valid
def getting_mask(input, mask_t, mask_b, mask_l, mask_r, is_random=False):
N, T, C, H, W = input.shape
if is_random:
mask_t, mask_b, mask_l, mask_r = random_mask(H, W)
mask = torch.zeros_like(input)
mask[:, :, :, mask_t:mask_b, mask_l:mask_r] = 1
valid = mask
hole = 1 - mask
single_valid = torch.zeros(N, T, 1, H, W)
single_valid[:, :, :, mask_t:mask_b, mask_l:mask_r] = 1
single_valid = single_valid.to(input.device)
return hole, valid, single_valid
def initialize_hole(INITIALIZATION, C, MEAN, seq, hole, valid, avg_hole):
if INITIALIZATION == 'mean':
if C == 3:
valid = valid.transpose(1, 2)
valid[:, 0, :, :, :] = valid[:, 0, :, :, :] * MEAN[0]
valid[:, 1, :, :, :] = valid[:, 1, :, :, :] * MEAN[1]
valid[:, 2, :, :, :] = valid[:, 2, :, :, :] * MEAN[2]
valid = valid.transpose(1, 2)
elif C == 1:
valid = valid * MEAN
seq = seq * hole + valid
elif INITIALIZATION == 'avg_hole':
seq = seq * hole + valid * avg_hole
# seq = valid
elif INITIALIZATION == 'white':
seq = seq * hole + valid
elif INITIALIZATION == 'black':
seq = seq * hole
return seq
def cv_show(img, H, W, C, name='img'):
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = (img * 255).astype(np.uint8)
cv2.imshow(name, img)
cv2.waitKey(3000)
def cat_input_valid(input, single_valid):
input = input.transpose(1, 2)
single_valid = single_valid.transpose(1, 2)
input = torch.cat([input, single_valid], dim=1)
input = input.transpose(1, 2)
img = input[0, 0, 0:3, :, :]
img = img.cpu().detach().numpy()
H = 64
W = 96
cv_show(img, H=H, W=W, C=3)
return input
# with open('../data/HOLE_MESH_LIST.pkl', 'rb') as f:
# HOLE_MESH_LIST = pickle.load(f)
AM_COUNT = 24
PM_COUNT = 48 - AM_COUNT
def output_csv(output, DATE, csv_file, NORMAL_MAX, MODE='HOLE'):
if MODE == 'PM':
START_TIME = '{} 12:00:00'.format(DATE)
if len(output) == 48:
output = output[AM_COUNT:]
if len(output) == PM_COUNT:
pass
else:
START_TIME = '{} 00:00:00'.format(DATE)
START_TIME = datetime.datetime.strptime(START_TIME, "%Y-%m-%d %H:%M:%S")
T, H, W, C = output.shape
# (10,8,16,1)-->(10,8,16)
output = output.reshape((T, H, W))
# 24,200,200,1
# MAX = 1291 #2019-07-19 csv
MAX = NORMAL_MAX
# VALID_H = 32
# VALID_W = 32
VALID_H = 32
VALID_W = 32
data_list = []
cnt = 0
hole_cnt = 0
for t in (range(T)):
for h in range(VALID_H):
for w in range(VALID_W):
cnt += 1
count = output[t][h][w] * MAX
count = round(count, 4)
count = int(count)
if count < 1.0: count = 0
ori_x = w
ori_y = h
meshname = '{},{}'.format(ori_x, ori_y)
TIME = (START_TIME + datetime.timedelta(minutes=30 * t)).strftime("%Y-%m-%d %H:%M:%S")
# if MODE == 'HOLE':
# if meshname in HOLE_MESH_LIST:
data_list.append([TIME, meshname, count])
# hole_cnt += 1
# else:
# data_list.append([TIME,meshname,count])
# hole_cnt += 1
if MODE == 'PM':
assert cnt == T * VALID_H * VALID_W
# if MODE == 'HOLE':
# assert hole_cnt == T * len(HOLE_MESH_LIST)
data = pd.DataFrame(data_list, columns=['datetime', 'meshname', 'count'])
data.sort_values(by=['datetime', 'meshname'], inplace=True, ascending=[True, True])
data.to_csv(csv_file, index=False)
def weights_init(model):
classname = model.__class__.__name__
if classname.find('Conv2d') != -1:
# nn.init.kaiming_normal_(model.weight.data, a=0, mode='fan_in')
nn.init.xavier_normal_(model.weight.data)
elif classname.find('Linear') != -1:
nn.init.kaiming_normal_(model.weight.data, a=0, mode='fan_in')
# nn.init.xavier_normal_(model.weight.data)
def VALRMSE(input, target, ds, m_factor):
# print(input.shape, target.shape)
# input = torch.tensor(input.data.cpu().numpy() * ds.img_std + ds.img_mean)
# target = torch.tensor(target.data.cpu().numpy() * ds.img_std + ds.img_mean)
rmse = torch.sqrt(F.mse_loss(input, target)) * (ds.mmn.max - ds.mmn.min) / 2. * m_factor
return rmse
def VALMAPE(input, target, mmn, m_factor):
mape = torch.mean(torch.abs((target - input) / input))
return mape
if __name__ == '__main__':
input = torch.zeros([64, 5, 1, 120, 160])
hole, valid, single_valid = loadding_mask(input, '../data/hole_64.png')
print(hole.shape, valid.shape, single_valid.shape)
| 26.786325 | 102 | 0.572112 |
3e471f5c918b5df126f63f8703dd30f17e8711be | 11,235 | py | Python | tensorflow/python/estimator/canned/linear.py | salonirk11/tensorflow | 7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7 | [
"Apache-2.0"
] | 5 | 2019-01-17T08:47:31.000Z | 2020-05-06T06:10:56.000Z | tensorflow/python/estimator/canned/linear.py | salonirk11/tensorflow | 7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/estimator/canned/linear.py | salonirk11/tensorflow | 7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7 | [
"Apache-2.0"
] | 3 | 2017-06-09T10:39:33.000Z | 2021-04-08T16:13:30.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import ftrl
from tensorflow.python.training import training_util
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return ftrl.FtrlOptimizer(learning_rate=learning_rate)
# TODO(b/36813849): Revisit passing params vs named arguments.
def _linear_model_fn(features, labels, mode, params, config):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: Dict of `Tensor`.
labels: `Tensor` of shape `[batch_size, logits_dimension]`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If mode or params are invalid.
"""
head = params['head']
feature_columns = tuple(params['feature_columns'])
optimizer = optimizers.get_optimizer_instance(
params.get('optimizer') or _get_default_optimizer(feature_columns),
learning_rate=_LEARNING_RATE)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = params.get('partitioner') or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
'linear',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
logits = feature_column_lib.linear_model(
features=features,
feature_columns=feature_columns,
units=head.logits_dimension)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class LinearClassifier(estimator.Estimator):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
estimator.train(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_feature_key` is not `None`, a feature with
`key=weight_feature_key` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
feature_columns,
model_dir=None,
n_classes=2,
weight_feature_key=None,
optimizer=None,
config=None,
partitioner=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_feature_key: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
"""
super(LinearClassifier, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params={
# pylint: disable=protected-access
# TODO(xiejw): Switch to the classifier head.
'head': head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=n_classes,
weight_feature_key=weight_feature_key),
# pylint: enable=protected-access
'feature_columns': feature_columns,
'optimizer': optimizer,
'partitioner': partitioner,
})
class LinearRegressor(estimator.Estimator):
"""An estimator for TensorFlow Linear regression problems.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_feature_key` is not `None`:
key=weight_feature_key, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self,
feature_columns,
model_dir=None,
label_dimension=1,
weight_feature_key=None,
optimizer=None,
config=None,
partitioner=None):
"""Initializes a `LinearRegressor` instance.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_feature_key: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: string, `tf.Optimizer` object, or callable that returns
`tf.Optimizer`. Defines the optimizer to use for training. If `None`,
will use the FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
partitioner: Optional. Partitioner for input layer.
"""
super(LinearRegressor, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params={
# pylint: disable=protected-access
'head': head_lib._regression_head_with_mean_squared_error_loss(
label_dimension=label_dimension,
weight_feature_key=weight_feature_key),
# pylint: enable=protected-access
'feature_columns': feature_columns,
'optimizer': optimizer,
'partitioner': partitioner,
})
| 38.214286 | 81 | 0.699599 |
662bf8fb067bfe0062dac23a3b425974a80d0f73 | 284 | py | Python | backend/src/crawling/google_maps/google_maps/pipelines.py | daniel6omez/ds4a-practicum | c80728091df98cd397be1f1fc94fd1fe2840c933 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | backend/src/crawling/google_maps/google_maps/pipelines.py | daniel6omez/ds4a-practicum | c80728091df98cd397be1f1fc94fd1fe2840c933 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | backend/src/crawling/google_maps/google_maps/pipelines.py | daniel6omez/ds4a-practicum | c80728091df98cd397be1f1fc94fd1fe2840c933 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class GoogleMapsPipeline:
def process_item(self, item, spider):
return item
| 23.666667 | 66 | 0.714789 |
76a55b2720e5aef245c1e877a4ed65d41b400293 | 2,309 | py | Python | mocasin/tasks/calculate_platform_embedding.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | 1 | 2022-03-13T19:27:50.000Z | 2022-03-13T19:27:50.000Z | mocasin/tasks/calculate_platform_embedding.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | null | null | null | mocasin/tasks/calculate_platform_embedding.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | null | null | null | # Copyright (C) 2017 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: Christian Menard, Andres Goens
import hydra
import logging
from mocasin.common.graph import DataflowGraph
import os
log = logging.getLogger(__name__)
def calculate_platform_embedding(cfg):
"""Calculate the embedding for a Platform Graph
This task expects two hydra parameters to be available, for the platform and
for the representation. The representation has to be an embedding
representation (MetricSpaceEmbedding or SymmetryEmbedding).
The options are taken from the metric space embedding representation.
The file is written to the path defined in the configuration under:
`platform.embedding_json`
**Hydra Parameters**:
* **platform:** the input platform. The task expects a configuration
dict that can be instantiated to a
:class:`~mocasin.common.platform.Platform` object.
* **representation:** the mapping representation to find the embedding.
This can be either MetricSpaceEmbedding or SymmetryEmbedding.
"""
platform = hydra.utils.instantiate(cfg["platform"])
json_file = cfg["platform"]["embedding_json"]
if json_file is not None and os.path.exists(json_file):
log.info("JSON file already found. Removing and recalculating")
os.remove(json_file)
elif json_file is None:
log.warning(
"No path specified for storing the file. Embedding won't be stored."
"\n You can specify it with: platform.embedding_json "
"= <output-file-path>"
)
if (
cfg["representation"]._target_
!= "mocasin.representations.MetricEmbeddingRepresentation"
and cfg["representation"]._target_
!= "mocasin.representations.SymmetryEmbedding"
):
raise RuntimeError(
"The calculate platform embedding task needs to be called "
"w/ the MetricSpaceEmbedding or SymmetryEmbedding representation."
f" Called with {cfg['representation']._target_}"
)
graph = DataflowGraph(name="EmptyGraph")
representation = hydra.utils.instantiate(
cfg["representation"], graph, platform
)
out_filename = str(cfg["out_file"])
representation.dump_json(out_filename)
| 38.483333 | 80 | 0.694673 |
f8d2e2c85b9891228f7abe539dd7dcc249df24e6 | 8,141 | py | Python | pyscf/mcscf/test/test_mcao2mo.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | 1 | 2018-05-02T19:55:30.000Z | 2018-05-02T19:55:30.000Z | pyscf/mcscf/test/test_mcao2mo.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | null | null | null | pyscf/mcscf/test/test_mcao2mo.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | 1 | 2018-12-06T03:10:50.000Z | 2018-12-06T03:10:50.000Z | #!/usr/bin/env python
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import mcscf
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
class KnowValues(unittest.TestCase):
def test_rhf(self):
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = 'cc-pvtz'
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = mcscf.CASSCF(m, 6, 4)
mc.verbose = 5
mo = m.mo_coeff
eris0 = mcscf.mc_ao2mo._ERIS(mc, mo, 'incore')
eris1 = mcscf.mc_ao2mo._ERIS(mc, mo, 'outcore')
eris2 = mcscf.mc_ao2mo._ERIS(mc, mo, 'outcore', level=1)
eris3 = mcscf.mc_ao2mo._ERIS(mc, mo, 'outcore', level=2)
self.assertTrue(numpy.allclose(eris0.vhf_c, eris1.vhf_c))
self.assertTrue(numpy.allclose(eris0.j_pc , eris1.j_pc ))
self.assertTrue(numpy.allclose(eris0.k_pc , eris1.k_pc ))
self.assertTrue(numpy.allclose(eris0.ppaa , eris1.ppaa ))
self.assertTrue(numpy.allclose(eris0.papa , eris1.papa ))
self.assertTrue(numpy.allclose(eris0.vhf_c, eris2.vhf_c))
self.assertTrue(numpy.allclose(eris0.j_pc , eris2.j_pc ))
self.assertTrue(numpy.allclose(eris0.k_pc , eris2.k_pc ))
self.assertTrue(numpy.allclose(eris0.ppaa , eris2.ppaa ))
self.assertTrue(numpy.allclose(eris0.papa , eris2.papa ))
self.assertTrue(numpy.allclose(eris0.vhf_c, eris3.vhf_c))
self.assertTrue(numpy.allclose(eris0.ppaa , eris3.ppaa ))
self.assertTrue(numpy.allclose(eris0.papa , eris3.papa ))
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nmo = mo.shape[1]
eri = ao2mo.incore.full(m._eri, mo, compact=False).reshape((nmo,)*4)
aaap = numpy.array(eri[ncore:nocc,ncore:nocc,ncore:nocc,:])
jc_pp = numpy.einsum('iipq->ipq', eri[:ncore,:ncore,:,:])
kc_pp = numpy.einsum('ipqi->ipq', eri[:ncore,:,:,:ncore])
vhf_c = numpy.einsum('cij->ij', jc_pp)*2 - numpy.einsum('cij->ij', kc_pp)
j_pc = numpy.einsum('ijj->ji', jc_pp)
k_pc = numpy.einsum('ijj->ji', kc_pp)
ppaa = numpy.array(eri[:,:,ncore:nocc,ncore:nocc])
papa = numpy.array(eri[:,ncore:nocc,:,ncore:nocc])
self.assertTrue(numpy.allclose(vhf_c, eris0.vhf_c))
self.assertTrue(numpy.allclose(j_pc , eris0.j_pc ))
self.assertTrue(numpy.allclose(k_pc , eris0.k_pc ))
self.assertTrue(numpy.allclose(ppaa , eris0.ppaa ))
self.assertTrue(numpy.allclose(papa , eris0.papa ))
def test_uhf(self):
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = 'cc-pvtz'
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
ehf = m.scf()
mc = mcscf.mc1step_uhf.CASSCF(m, 4, 4)
mc.verbose = 5
mo = m.mo_coeff
eris0 = mcscf.mc_ao2mo_uhf._ERIS(mc, mo, 'incore')
eris1 = mcscf.mc_ao2mo_uhf._ERIS(mc, mo, 'outcore')
self.assertTrue(numpy.allclose(eris1.jkcpp, eris0.jkcpp))
self.assertTrue(numpy.allclose(eris1.jkcPP, eris0.jkcPP))
self.assertTrue(numpy.allclose(eris1.jC_pp, eris0.jC_pp))
self.assertTrue(numpy.allclose(eris1.jc_PP, eris0.jc_PP))
self.assertTrue(numpy.allclose(eris1.aapp , eris0.aapp ))
self.assertTrue(numpy.allclose(eris1.aaPP , eris0.aaPP ))
self.assertTrue(numpy.allclose(eris1.AApp , eris0.AApp ))
self.assertTrue(numpy.allclose(eris1.AAPP , eris0.AAPP ))
self.assertTrue(numpy.allclose(eris1.appa , eris0.appa ))
self.assertTrue(numpy.allclose(eris1.apPA , eris0.apPA ))
self.assertTrue(numpy.allclose(eris1.APPA , eris0.APPA ))
self.assertTrue(numpy.allclose(eris1.cvCV , eris0.cvCV ))
self.assertTrue(numpy.allclose(eris1.Icvcv, eris0.Icvcv))
self.assertTrue(numpy.allclose(eris1.ICVCV, eris0.ICVCV))
self.assertTrue(numpy.allclose(eris1.Iapcv, eris0.Iapcv))
self.assertTrue(numpy.allclose(eris1.IAPCV, eris0.IAPCV))
self.assertTrue(numpy.allclose(eris1.apCV , eris0.apCV ))
self.assertTrue(numpy.allclose(eris1.APcv , eris0.APcv ))
nmo = mo[0].shape[1]
ncore = mc.ncore
ncas = mc.ncas
nocc = (ncas + ncore[0], ncas + ncore[1])
eriaa = ao2mo.incore.full(mc._scf._eri, mo[0])
eriab = ao2mo.incore.general(mc._scf._eri, (mo[0],mo[0],mo[1],mo[1]))
eribb = ao2mo.incore.full(mc._scf._eri, mo[1])
eriaa = ao2mo.restore(1, eriaa, nmo)
eriab = ao2mo.restore(1, eriab, nmo)
eribb = ao2mo.restore(1, eribb, nmo)
jkcpp = numpy.einsum('iipq->ipq', eriaa[:ncore[0],:ncore[0],:,:]) \
- numpy.einsum('ipqi->ipq', eriaa[:ncore[0],:,:,:ncore[0]])
jkcPP = numpy.einsum('iipq->ipq', eribb[:ncore[1],:ncore[1],:,:]) \
- numpy.einsum('ipqi->ipq', eribb[:ncore[1],:,:,:ncore[1]])
jC_pp = numpy.einsum('pqii->pq', eriab[:,:,:ncore[1],:ncore[1]])
jc_PP = numpy.einsum('iipq->pq', eriab[:ncore[0],:ncore[0],:,:])
aapp = numpy.copy(eriaa[ncore[0]:nocc[0],ncore[0]:nocc[0],:,:])
aaPP = numpy.copy(eriab[ncore[0]:nocc[0],ncore[0]:nocc[0],:,:])
AApp = numpy.copy(eriab[:,:,ncore[1]:nocc[1],ncore[1]:nocc[1]].transpose(2,3,0,1))
AAPP = numpy.copy(eribb[ncore[1]:nocc[1],ncore[1]:nocc[1],:,:])
appa = numpy.copy(eriaa[ncore[0]:nocc[0],:,:,ncore[0]:nocc[0]])
apPA = numpy.copy(eriab[ncore[0]:nocc[0],:,:,ncore[1]:nocc[1]])
APPA = numpy.copy(eribb[ncore[1]:nocc[1],:,:,ncore[1]:nocc[1]])
cvCV = numpy.copy(eriab[:ncore[0],ncore[0]:,:ncore[1],ncore[1]:])
Icvcv = eriaa[:ncore[0],ncore[0]:,:ncore[0],ncore[0]:] * 2\
- eriaa[:ncore[0],:ncore[0],ncore[0]:,ncore[0]:].transpose(0,3,1,2) \
- eriaa[:ncore[0],ncore[0]:,:ncore[0],ncore[0]:].transpose(0,3,2,1)
ICVCV = eribb[:ncore[1],ncore[1]:,:ncore[1],ncore[1]:] * 2\
- eribb[:ncore[1],:ncore[1],ncore[1]:,ncore[1]:].transpose(0,3,1,2) \
- eribb[:ncore[1],ncore[1]:,:ncore[1],ncore[1]:].transpose(0,3,2,1)
Iapcv = eriaa[ncore[0]:nocc[0],:,:ncore[0],ncore[0]:] * 2 \
- eriaa[:,ncore[0]:,:ncore[0],ncore[0]:nocc[0]].transpose(3,0,2,1) \
- eriaa[:,:ncore[0],ncore[0]:,ncore[0]:nocc[0]].transpose(3,0,1,2)
IAPCV = eribb[ncore[1]:nocc[1],:,:ncore[1],ncore[1]:] * 2 \
- eribb[:,ncore[1]:,:ncore[1],ncore[1]:nocc[1]].transpose(3,0,2,1) \
- eribb[:,:ncore[1],ncore[1]:,ncore[1]:nocc[1]].transpose(3,0,1,2)
apCV = numpy.copy(eriab[ncore[0]:nocc[0],:,:ncore[1],ncore[1]:])
APcv = numpy.copy(eriab[:ncore[0],ncore[0]:,ncore[1]:nocc[1],:].transpose(2,3,0,1))
self.assertTrue(numpy.allclose(jkcpp, eris0.jkcpp))
self.assertTrue(numpy.allclose(jkcPP, eris0.jkcPP))
self.assertTrue(numpy.allclose(jC_pp, eris0.jC_pp))
self.assertTrue(numpy.allclose(jc_PP, eris0.jc_PP))
self.assertTrue(numpy.allclose(aapp , eris0.aapp ))
self.assertTrue(numpy.allclose(aaPP , eris0.aaPP ))
self.assertTrue(numpy.allclose(AApp , eris0.AApp ))
self.assertTrue(numpy.allclose(AAPP , eris0.AAPP ))
self.assertTrue(numpy.allclose(appa , eris0.appa ))
self.assertTrue(numpy.allclose(apPA , eris0.apPA ))
self.assertTrue(numpy.allclose(APPA , eris0.APPA ))
self.assertTrue(numpy.allclose(cvCV , eris0.cvCV ))
self.assertTrue(numpy.allclose(Icvcv, eris0.Icvcv))
self.assertTrue(numpy.allclose(ICVCV, eris0.ICVCV))
self.assertTrue(numpy.allclose(Iapcv, eris0.Iapcv))
self.assertTrue(numpy.allclose(IAPCV, eris0.IAPCV))
self.assertTrue(numpy.allclose(apCV , eris0.apCV ))
self.assertTrue(numpy.allclose(APcv , eris0.APcv ))
if __name__ == "__main__":
print("Full Tests for mc_ao2mo")
unittest.main()
| 47.331395 | 91 | 0.594767 |
73a62a5b10f6e82f4a2b8463aa1c2d6a541c823c | 1,264 | py | Python | saas/backend/apps/policy/migrations/0004_auto_20200402_1134.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | 7 | 2021-08-13T03:48:16.000Z | 2021-12-20T15:31:38.000Z | saas/backend/apps/policy/migrations/0004_auto_20200402_1134.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | 456 | 2021-08-16T02:13:57.000Z | 2022-03-30T10:02:49.000Z | saas/backend/apps/policy/migrations/0004_auto_20200402_1134.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | 17 | 2021-08-10T04:08:46.000Z | 2022-03-14T14:24:36.000Z | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 2.2.10 on 2020-04-02 03:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('policy', '0003_auto_20200313_1628'),
]
operations = [
migrations.AlterIndexTogether(
name='attachpolicy',
index_together={('subject_id', 'subject_type', 'system_id', 'action_type')},
),
migrations.AlterIndexTogether(
name='policy',
index_together={('subject_id', 'subject_type', 'system_id')},
),
]
| 39.5 | 115 | 0.706487 |
5bf652c206a5fa93c81bdd05b2551bc1377fa361 | 12,618 | py | Python | pandas/tests/libs/test_hashtable.py | rajat315315/pandas | 2eec4f7cfa1c45671b9875062343521a53ae8b28 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/libs/test_hashtable.py | rajat315315/pandas | 2eec4f7cfa1c45671b9875062343521a53ae8b28 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/libs/test_hashtable.py | rajat315315/pandas | 2eec4f7cfa1c45671b9875062343521a53ae8b28 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | from contextlib import contextmanager
import tracemalloc
import numpy as np
import pytest
from pandas._libs import hashtable as ht
import pandas._testing as tm
@contextmanager
def activated_tracemalloc():
tracemalloc.start()
try:
yield
finally:
tracemalloc.stop()
def get_allocated_khash_memory():
snapshot = tracemalloc.take_snapshot()
snapshot = snapshot.filter_traces(
(tracemalloc.DomainFilter(True, ht.get_hashtable_trace_domain()),)
)
return sum(map(lambda x: x.size, snapshot.traces))
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.PyObjectHashTable, np.object_),
(ht.Complex128HashTable, np.complex128),
(ht.Int64HashTable, np.int64),
(ht.UInt64HashTable, np.uint64),
(ht.Float64HashTable, np.float64),
(ht.Complex64HashTable, np.complex64),
(ht.Int32HashTable, np.int32),
(ht.UInt32HashTable, np.uint32),
(ht.Float32HashTable, np.float32),
(ht.Int16HashTable, np.int16),
(ht.UInt16HashTable, np.uint16),
(ht.Int8HashTable, np.int8),
(ht.UInt8HashTable, np.uint8),
],
)
class TestHashTable:
def test_get_set_contains_len(self, table_type, dtype):
index = 5
table = table_type(55)
assert len(table) == 0
assert index not in table
table.set_item(index, 42)
assert len(table) == 1
assert index in table
assert table.get_item(index) == 42
table.set_item(index + 1, 41)
assert index in table
assert index + 1 in table
assert len(table) == 2
assert table.get_item(index) == 42
assert table.get_item(index + 1) == 41
table.set_item(index, 21)
assert index in table
assert index + 1 in table
assert len(table) == 2
assert table.get_item(index) == 21
assert table.get_item(index + 1) == 41
assert index + 2 not in table
with pytest.raises(KeyError, match=str(index + 2)):
table.get_item(index + 2)
def test_map(self, table_type, dtype, writable):
# PyObjectHashTable has no map-method
if table_type != ht.PyObjectHashTable:
N = 77
table = table_type()
keys = np.arange(N).astype(dtype)
vals = np.arange(N).astype(np.int64) + N
keys.flags.writeable = writable
vals.flags.writeable = writable
table.map(keys, vals)
for i in range(N):
assert table.get_item(keys[i]) == i + N
def test_map_locations(self, table_type, dtype, writable):
N = 8
table = table_type()
keys = (np.arange(N) + N).astype(dtype)
keys.flags.writeable = writable
table.map_locations(keys)
for i in range(N):
assert table.get_item(keys[i]) == i
def test_lookup(self, table_type, dtype, writable):
N = 3
table = table_type()
keys = (np.arange(N) + N).astype(dtype)
keys.flags.writeable = writable
table.map_locations(keys)
result = table.lookup(keys)
expected = np.arange(N)
tm.assert_numpy_array_equal(result.astype(np.int64), expected.astype(np.int64))
def test_lookup_wrong(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 100
else:
N = 512
table = table_type()
keys = (np.arange(N) + N).astype(dtype)
table.map_locations(keys)
wrong_keys = np.arange(N).astype(dtype)
result = table.lookup(wrong_keys)
assert np.all(result == -1)
def test_unique(self, table_type, dtype, writable):
if dtype in (np.int8, np.uint8):
N = 88
else:
N = 1000
table = table_type()
expected = (np.arange(N) + N).astype(dtype)
keys = np.repeat(expected, 5)
keys.flags.writeable = writable
unique = table.unique(keys)
tm.assert_numpy_array_equal(unique, expected)
def test_tracemalloc_works(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 256
else:
N = 30000
keys = np.arange(N).astype(dtype)
with activated_tracemalloc():
table = table_type()
table.map_locations(keys)
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_tracemalloc_for_empty(self, table_type, dtype):
with activated_tracemalloc():
table = table_type()
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_get_labels_groupby_for_Int64(writable):
table = ht.Int64HashTable()
vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
vals.flags.writeable = writable
arr, unique = table.get_labels_groupby(vals)
expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.int64)
expected_unique = np.array([1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(arr.astype(np.int64), expected_arr)
tm.assert_numpy_array_equal(unique, expected_unique)
def test_tracemalloc_works_for_StringHashTable():
N = 1000
keys = np.arange(N).astype(np.compat.unicode).astype(np.object_)
with activated_tracemalloc():
table = ht.StringHashTable()
table.map_locations(keys)
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_tracemalloc_for_empty_StringHashTable():
with activated_tracemalloc():
table = ht.StringHashTable()
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.Float64HashTable, np.float64),
(ht.Float32HashTable, np.float32),
(ht.Complex128HashTable, np.complex128),
(ht.Complex64HashTable, np.complex64),
],
)
class TestHashTableWithNans:
def test_get_set_contains_len(self, table_type, dtype):
index = float("nan")
table = table_type()
assert index not in table
table.set_item(index, 42)
assert len(table) == 1
assert index in table
assert table.get_item(index) == 42
table.set_item(index, 41)
assert len(table) == 1
assert index in table
assert table.get_item(index) == 41
def test_map(self, table_type, dtype):
N = 332
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
vals = (np.arange(N) + N).astype(np.int64)
table.map(keys, vals)
assert len(table) == 1
assert table.get_item(np.nan) == 2 * N - 1
def test_map_locations(self, table_type, dtype):
N = 10
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
table.map_locations(keys)
assert len(table) == 1
assert table.get_item(np.nan) == N - 1
def test_unique(self, table_type, dtype):
N = 1020
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
unique = table.unique(keys)
assert np.all(np.isnan(unique)) and len(unique) == 1
def get_ht_function(fun_name, type_suffix):
return getattr(ht, fun_name + "_" + type_suffix)
@pytest.mark.parametrize(
"dtype, type_suffix",
[
(np.object_, "object"),
(np.complex128, "complex128"),
(np.int64, "int64"),
(np.uint64, "uint64"),
(np.float64, "float64"),
(np.complex64, "complex64"),
(np.int32, "int32"),
(np.uint32, "uint32"),
(np.float32, "float32"),
(np.int16, "int16"),
(np.uint16, "uint16"),
(np.int8, "int8"),
(np.uint8, "uint8"),
],
)
class TestHelpFunctions:
def test_value_count(self, dtype, type_suffix, writable):
N = 43
value_count = get_ht_function("value_count", type_suffix)
expected = (np.arange(N) + N).astype(dtype)
values = np.repeat(expected, 5)
values.flags.writeable = writable
keys, counts = value_count(values, False)
tm.assert_numpy_array_equal(np.sort(keys), expected)
assert np.all(counts == 5)
def test_value_count_stable(self, dtype, type_suffix, writable):
# GH12679
value_count = get_ht_function("value_count", type_suffix)
values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)
values.flags.writeable = writable
keys, counts = value_count(values, False)
tm.assert_numpy_array_equal(keys, values)
assert np.all(counts == 1)
def test_duplicated_first(self, dtype, type_suffix, writable):
N = 100
duplicated = get_ht_function("duplicated", type_suffix)
values = np.repeat(np.arange(N).astype(dtype), 5)
values.flags.writeable = writable
result = duplicated(values)
expected = np.ones_like(values, dtype=np.bool_)
expected[::5] = False
tm.assert_numpy_array_equal(result, expected)
def test_ismember_yes(self, dtype, type_suffix, writable):
N = 127
ismember = get_ht_function("ismember", type_suffix)
arr = np.arange(N).astype(dtype)
values = np.arange(N).astype(dtype)
arr.flags.writeable = writable
values.flags.writeable = writable
result = ismember(arr, values)
expected = np.ones_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_ismember_no(self, dtype, type_suffix):
N = 17
ismember = get_ht_function("ismember", type_suffix)
arr = np.arange(N).astype(dtype)
values = (np.arange(N) + N).astype(dtype)
result = ismember(arr, values)
expected = np.zeros_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_mode(self, dtype, type_suffix, writable):
if dtype in (np.int8, np.uint8):
N = 53
else:
N = 11111
mode = get_ht_function("mode", type_suffix)
values = np.repeat(np.arange(N).astype(dtype), 5)
values[0] = 42
values.flags.writeable = writable
result = mode(values, False)
assert result == 42
@pytest.mark.parametrize(
"dtype, type_suffix",
[
(np.float64, "float64"),
(np.float32, "float32"),
(np.complex128, "complex128"),
(np.complex64, "complex64"),
],
)
class TestHelpFunctionsWithNans:
def test_value_count(self, dtype, type_suffix):
value_count = get_ht_function("value_count", type_suffix)
values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
keys, counts = value_count(values, True)
assert len(keys) == 0
keys, counts = value_count(values, False)
assert len(keys) == 1 and np.all(np.isnan(keys))
assert counts[0] == 3
def test_duplicated_first(self, dtype, type_suffix):
duplicated = get_ht_function("duplicated", type_suffix)
values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
result = duplicated(values)
expected = np.array([False, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_ismember_yes(self, dtype, type_suffix):
ismember = get_ht_function("ismember", type_suffix)
arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
values = np.array([np.nan, np.nan], dtype=dtype)
result = ismember(arr, values)
expected = np.array([True, True, True], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_ismember_no(self, dtype, type_suffix):
ismember = get_ht_function("ismember", type_suffix)
arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
values = np.array([1], dtype=dtype)
result = ismember(arr, values)
expected = np.array([False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_mode(self, dtype, type_suffix):
mode = get_ht_function("mode", type_suffix)
values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype)
assert mode(values, True) == 42
assert np.isnan(mode(values, False))
| 33.737968 | 87 | 0.610794 |
73251fc1abebe7c479b24d7649f559b762d04840 | 2,572 | py | Python | python/pyspark/pandas/spark/functions.py | akhalymon-cv/spark | 76191b9151b6a7804f8894e53eef74106f98b787 | [
"Apache-2.0"
] | 9 | 2015-03-30T02:54:31.000Z | 2020-02-23T03:28:39.000Z | python/pyspark/pandas/spark/functions.py | akhalymon-cv/spark | 76191b9151b6a7804f8894e53eef74106f98b787 | [
"Apache-2.0"
] | 39 | 2018-09-25T21:57:04.000Z | 2022-03-27T17:34:43.000Z | python/pyspark/pandas/spark/functions.py | akhalymon-cv/spark | 76191b9151b6a7804f8894e53eef74106f98b787 | [
"Apache-2.0"
] | 10 | 2016-10-12T09:04:47.000Z | 2021-04-29T19:07:20.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Additional Spark functions used in pandas-on-Spark.
"""
from typing import Any, Union, no_type_check
import numpy as np
from pyspark import SparkContext
from pyspark.sql import functions as F
from pyspark.sql.column import (
Column,
_to_java_column,
_create_column_from_literal,
)
from pyspark.sql.types import (
ByteType,
FloatType,
IntegerType,
LongType,
)
def repeat(col: Column, n: Union[int, Column]) -> Column:
"""
Repeats a string column n times, and returns it as a new string column.
"""
sc = SparkContext._active_spark_context # type: ignore[attr-defined]
n = _to_java_column(n) if isinstance(n, Column) else _create_column_from_literal(n)
return _call_udf(sc, "repeat", _to_java_column(col), n)
def lit(literal: Any) -> Column:
"""
Creates a Column of literal value.
"""
if isinstance(literal, np.generic):
scol = F.lit(literal.item())
if isinstance(literal, np.int64):
return scol.astype(LongType())
elif isinstance(literal, np.int32):
return scol.astype(IntegerType())
elif isinstance(literal, np.int8) or isinstance(literal, np.byte):
return scol.astype(ByteType())
elif isinstance(literal, np.float32):
return scol.astype(FloatType())
else: # TODO: Complete mappings between numpy literals and Spark data types
return scol
else:
return F.lit(literal)
@no_type_check
def _call_udf(sc, name, *cols):
return Column(sc._jvm.functions.callUDF(name, _make_arguments(sc, *cols)))
@no_type_check
def _make_arguments(sc, *cols):
java_arr = sc._gateway.new_array(sc._jvm.Column, len(cols))
for i, col in enumerate(cols):
java_arr[i] = col
return java_arr
| 32.556962 | 87 | 0.703344 |
5e759ad1c59c2b6928f2ac955f17410ec56f6fca | 4,048 | py | Python | src/quadtree.py | myumoon/py_quadtree | 3ecf44de650a3d55094dd858da293e2e80e913c0 | [
"MIT"
] | null | null | null | src/quadtree.py | myumoon/py_quadtree | 3ecf44de650a3d55094dd858da293e2e80e913c0 | [
"MIT"
] | null | null | null | src/quadtree.py | myumoon/py_quadtree | 3ecf44de650a3d55094dd858da293e2e80e913c0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
import math
class QuadTree(object):
u"""四分木
"""
def __init__(self, level, rectBeginXY, rectEndXY):
"""コンストラクタ
@param level 空間を分割する深さ(0=空間数0, 1=空間数9, 2=空間数73 ...)
@param rectBeginXY 空間開始座標
@param rectEndXY 空間終点座標
"""
self.__level = level
self.__begin = rectBeginXY
self.__end = rectEndXY
assert self.__begin.x < self.__end.x
assert self.__begin.y < self.__end.y
self.__size = math.Vec2(self.__end - self.__begin)
self.clear()
def add(self, obj):
u"""オブジェクトをツリーに登録
"""
idx = self.getIndexOf(obj)
if 0 <= idx:
self.linerTree[idx].append(obj)
def clear(self):
u"""ツリーの内容をクリア
"""
# 線形ツリーに必要な要素分だけ確保
self.linerTree = [[] for i in range(2 ** (2 * self.__level))]
def traverse(self, traverser):
"""ツリー内を探索
"""
pass
def getIndexOf(self, sphere):
"""指定オブジェクトが所属する空間インデックスを返す
"""
print "sphere", sphere
sphereMin = math.Vec2(sphere.pos.x - sphere.r, sphere.pos.y - sphere.r)
sphereMax = math.Vec2(sphere.pos.x + sphere.r, sphere.pos.y + sphere.r)
print "sphereMin", sphereMin
print "sphereMax", sphereMax
minIdx = math.Vec2(
self.__getAxisIndex(sphereMin.x, self.__begin.x, self.__end.x),
self.__getAxisIndex(sphereMin.y, self.__begin.y, self.__end.y)
)
maxIdx = math.Vec2(
self.__getAxisIndex(sphereMax.x, self.__begin.x, self.__end.x),
self.__getAxisIndex(sphereMax.y, self.__begin.y, self.__end.y)
)
if minIdx.x < 0 or minIdx.y < 0 or maxIdx.x < 0 or maxIdx.y < 0:
return -1
print "minIdx", minIdx
print "maxIdx", maxIdx
minMortonIndex = self.__getMortonIndex(minIdx.x, minIdx.y)
maxMortonIndex = self.__getMortonIndex(maxIdx.x, maxIdx.y)
print "minMortonIndex", minMortonIndex
print "maxMortonIndex", maxMortonIndex
commonLevel = self.__getCommonLevel(minMortonIndex, maxMortonIndex)
print "commonLevel", commonLevel
if 0 < commonLevel:
commonMortonIndex = minMortonIndex >> ((self.__level - commonLevel) * 2)
print "commonMortonIndex", commonMortonIndex
# todo:事前に計算したほうがいい
offset = (4 ** commonLevel - 1) / 3
sphereIndex = offset + commonMortonIndex
return sphereIndex
return 0
def __separateBit(self, n):
result = 0
for i in reversed(xrange(0, self.__level + 1)):
result |= (n & (1 << i)) << i
return result
def __getMortonIndex(self, idxX, idxY):
return self.__separateBit(idxX) | self.__separateBit(idxY) << 1
def __getCommonLevel(self, idx0, idx1):
u"""2点の共有空間レベルを取得
"""
print "idx0:%d(%s)" % (idx0, bin(idx0))
print "idx1:%d(%s)" % (idx1, bin(idx1))
if self.__level == 0:
return 0
xor = idx0 ^ idx1
print "xor", bin(xor)
level = self.__level
while 0 < xor:
xor = xor >> 2
level -= 1
print "shift level", level
return level
def __getAxisIndex(self, pos, begin, end):
"""軸に対するインデックスを返す
"""
# 範囲外はマイナス値を返す
if pos < begin or end < pos:
return -1
width = end - begin
return int((pos - begin) / (float(width) / (2 ** self.__level)))
# test
#sphere1 = math.Sphere(math.Vec3(200, 300, 400), 50.0)
#sphere2 = math.Sphere(math.Vec3(300, 400, 500), 20.0)
sphere1 = math.Sphere(math.Vec3(0, 0, 0), 0.5)
sphere2 = math.Sphere(math.Vec3(1, 0, 0), 0.5)
#camera = math.Sphere(math.Vec3(210, 310, 410), 1.0)
octtree = QuadTree(3, math.Vec3(0, 0, 0), math.Vec3(4, 4, 4))
#octtree.add(sphere1)
#octtree.add(sphere2)
#print octtree.getIndexOf(sphere1)
#print octtree.getIndexOf(sphere2)
#octtree.traverse(camera)
#print octtree._OctTree__getCommonLevel(16, 23)
#print octtree.getIndexOf(math.Sphere(math.Vec2(1, 1), 0.8))
#print octtree.getIndexOf(math.Sphere(math.Vec2(2, 2), 0.8))
#print octtree.getIndexOf(math.Sphere(math.Vec2(3, 3), 0.8))
#print octtree.getIndexOf(math.Sphere(math.Vec2(0.5, 0.5), 0.1))
#print octtree.getIndexOf(math.Sphere(math.Vec2(3.5, 3.5), 0.1))
octtree.add(math.Sphere(math.Vec2(3.5, 3.5), 0.1))
| 29.547445 | 76 | 0.64748 |
ca83fcd591ed3d27e406f0170f4f9b1d97b74642 | 9,740 | py | Python | tests/python/pants_test/java/junit/test_junit_xml_parser.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | 1 | 2020-08-26T03:30:31.000Z | 2020-08-26T03:30:31.000Z | tests/python/pants_test/java/junit/test_junit_xml_parser.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | 1 | 2019-07-29T16:58:21.000Z | 2019-07-29T16:58:21.000Z | tests/python/pants_test/java/junit/test_junit_xml_parser.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import unittest
# NB: The Test -> JUnitTest import re-name above is needed to work around conflicts with pytest test
# collection and a conflicting Test type in scope during that process.
from pants.java.junit.junit_xml_parser import ParseError, RegistryOfTests
from pants.java.junit.junit_xml_parser import Test as JUnitTest
from pants.java.junit.junit_xml_parser import parse_failed_targets
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_open
from pants.util.xml_parser import XmlParser
class TestTest(unittest.TestCase):
def setUp(self):
self.class_test = JUnitTest('class')
self.method_test = JUnitTest('class', 'method')
def test_no_method_normalization(self):
def test_normalization(variant):
self.assertEqual(variant, self.class_test)
self.assertIsNone(variant.methodname)
test_normalization(JUnitTest('class', ''))
test_normalization(JUnitTest('class', None))
test_normalization(JUnitTest('class'))
def test_enclosing(self):
self.assertIs(self.class_test, self.class_test.enclosing())
self.assertEqual(self.class_test, self.method_test.enclosing())
def test_render_test_spec(self):
self.assertEqual('class', self.class_test.render_test_spec())
self.assertEqual('class#method', self.method_test.render_test_spec())
class TestTestRegistry(unittest.TestCase):
def test_empty(self):
self.assertTrue(RegistryOfTests({}).empty)
self.assertTrue(RegistryOfTests(()).empty)
self.assertTrue(RegistryOfTests([]).empty)
def test_get_owning_target(self):
registry = RegistryOfTests(((JUnitTest('class1'), 'Bob'),
(JUnitTest('class2'), 'Jane'),
(JUnitTest('class3', 'method1'), 'Heidi')))
self.assertEqual('Bob', registry.get_owning_target(JUnitTest('class1')))
self.assertEqual('Bob', registry.get_owning_target(JUnitTest('class1', 'method1')))
self.assertEqual('Jane', registry.get_owning_target(JUnitTest('class2')))
self.assertEqual('Jane', registry.get_owning_target(JUnitTest('class2', 'method1')))
self.assertIsNone(registry.get_owning_target(JUnitTest('class3')))
self.assertEqual('Heidi', registry.get_owning_target(JUnitTest('class3', 'method1')))
def _get_sample_test_registry(self):
return RegistryOfTests(((JUnitTest('a.b.c1'), 'target_a'),
(JUnitTest('x.y.c1', methodname='method1'), 'target_b'),
(JUnitTest('a.b.c2'), 'target_c'),
(JUnitTest('a.b.c3', 'method3'), 'target_d')))
def test_match_test_specs_fqcn(self):
registry = self._get_sample_test_registry()
test_specs = [JUnitTest(classname='a.b.c1', methodname=None)]
matched_specs, unknown_tests = registry.match_test_spec(test_specs)
self.assertEqual({JUnitTest('a.b.c1'): 'target_a', }, matched_specs)
self.assertEqual([], unknown_tests)
def test_match_test_specs_fqcn_with_methodname(self):
registry = self._get_sample_test_registry()
test_specs = [JUnitTest(classname='a.b.c1', methodname='method')]
matched_specs, unknown_tests = registry.match_test_spec(test_specs)
self.assertEqual({JUnitTest('a.b.c1', methodname='method'): 'target_a'}, matched_specs)
self.assertEqual([], unknown_tests)
def test_match_test_specs_non_fqcn(self):
registry = self._get_sample_test_registry()
spec, unknown_tests = registry.match_test_spec([JUnitTest(classname='c1', methodname=None)])
self.assertEqual({
JUnitTest('a.b.c1'): 'target_a',
JUnitTest('x.y.c1'): 'target_b',
}, spec)
self.assertEqual([], unknown_tests)
def test_match_test_specs_non_fqcn_no_match(self):
registry = self._get_sample_test_registry()
test_specs = [JUnitTest(classname='c4', methodname=None)]
spec, unknown_tests = registry.match_test_spec(test_specs)
self.assertEqual({}, spec)
self.assertEqual(test_specs, unknown_tests)
def _assert_index(self, expected, actual):
def sorted_values(index):
# Eliminate unimportant ordering differences in the index values.
return {key: sorted(values) for key, values in index.items()}
self.assertEqual(sorted_values(expected), sorted_values(actual))
def test_index_nominal(self):
registry = RegistryOfTests({JUnitTest('class1'): (1, 'a'),
JUnitTest('class2'): (2, 'b'),
JUnitTest('class3', 'method1'): (1, 'a'),
JUnitTest('class3', 'method2'): (4, 'b')})
actual_index = registry.index(lambda t: t[0], lambda t: t[1])
expected_index = {(1, 'a'): (JUnitTest('class1'), JUnitTest('class3', 'method1')),
(2, 'b'): (JUnitTest('class2'),),
(4, 'b'): (JUnitTest('class3', 'method2'),)}
self._assert_index(expected_index, actual_index)
def test_index_empty(self):
self._assert_index({}, RegistryOfTests({}).index())
def test_index_no_indexers(self):
registry = RegistryOfTests({JUnitTest('class1'): (1, 'a'),
JUnitTest('class2'): (2, 'b')})
self._assert_index({(): (JUnitTest('class1'), JUnitTest('class2'))}, registry.index())
class TestParseFailedTargets(unittest.TestCase):
@staticmethod
def _raise_handler(e):
raise e
class CollectHandler:
def __init__(self):
self._errors = []
def __call__(self, e):
self._errors.append(e)
@property
def errors(self):
return self._errors
def test_parse_failed_targets_no_files(self):
registry = RegistryOfTests({})
with temporary_dir() as junit_xml_dir:
failed_targets = parse_failed_targets(registry, junit_xml_dir, self._raise_handler)
self.assertEqual({}, failed_targets)
def test_parse_failed_targets_nominal(self):
registry = RegistryOfTests({JUnitTest('org.pantsbuild.Failure'): 'Bob',
JUnitTest('org.pantsbuild.Error'): 'Jane',
JUnitTest('org.pantsbuild.AnotherError'): 'Bob',
JUnitTest('org.pantsbuild.subpackage.AnotherFailure'): 'Mary'})
with temporary_dir() as junit_xml_dir:
with open(os.path.join(junit_xml_dir, 'TEST-a.xml'), 'w') as fp:
fp.write("""
<testsuite failures="1" errors="1">
<testcase classname="org.pantsbuild.Green" name="testOK"/>
<testcase classname="org.pantsbuild.Failure" name="testFailure">
<failure/>
</testcase>
<testcase classname="org.pantsbuild.Error" name="testError">
<error/>
</testcase>
</testsuite>
""")
with open(os.path.join(junit_xml_dir, 'TEST-b.xml'), 'w') as fp:
fp.write("""
<testsuite failures="0" errors="1">
<testcase classname="org.pantsbuild.AnotherError" name="testAnotherError">
<error/>
</testcase>
</testsuite>
""")
with open(os.path.join(junit_xml_dir, 'random.xml'), 'w') as fp:
fp.write('<invalid></xml>')
with safe_open(os.path.join(junit_xml_dir, 'subdir', 'TEST-c.xml'), 'w') as fp:
fp.write("""
<testsuite failures="1" errors="0">
<testcase classname="org.pantsbuild.subpackage.AnotherFailure" name="testAnotherFailue">
<failure/>
</testcase>
</testsuite>
""")
failed_targets = parse_failed_targets(registry, junit_xml_dir, self._raise_handler)
self.assertEqual({'Bob': {JUnitTest('org.pantsbuild.Failure', 'testFailure'),
JUnitTest('org.pantsbuild.AnotherError', 'testAnotherError')},
'Jane': {JUnitTest('org.pantsbuild.Error', 'testError')},
'Mary': {JUnitTest('org.pantsbuild.subpackage.AnotherFailure',
'testAnotherFailue')}},
failed_targets)
def test_parse_failed_targets_error_raise(self):
registry = RegistryOfTests({})
with temporary_dir() as junit_xml_dir:
junit_xml_file = os.path.join(junit_xml_dir, 'TEST-bad.xml')
with open(junit_xml_file, 'w') as fp:
fp.write('<invalid></xml>')
with self.assertRaises(ParseError) as exc:
parse_failed_targets(registry, junit_xml_dir, self._raise_handler)
self.assertEqual(junit_xml_file, exc.exception.xml_path)
self.assertIsInstance(exc.exception.cause, XmlParser.XmlError)
def test_parse_failed_targets_error_continue(self):
registry = RegistryOfTests({})
with temporary_dir() as junit_xml_dir:
bad_file1 = os.path.join(junit_xml_dir, 'TEST-bad1.xml')
with open(bad_file1, 'w') as fp:
fp.write('<testsuite failures="nan" errors="0"/>')
with open(os.path.join(junit_xml_dir, 'TEST-good.xml'), 'w') as fp:
fp.write("""
<testsuite failures="0" errors="1">
<testcase classname="org.pantsbuild.Error" name="testError">
<error/>
</testcase>
</testsuite>
""")
bad_file2 = os.path.join(junit_xml_dir, 'TEST-bad2.xml')
with open(bad_file2, 'w') as fp:
fp.write('<invalid></xml>')
collect_handler = self.CollectHandler()
failed_targets = parse_failed_targets(registry, junit_xml_dir, collect_handler)
self.assertEqual(2, len(collect_handler.errors))
self.assertEqual({bad_file1, bad_file2}, {e.xml_path for e in collect_handler.errors})
self.assertEqual({None: {JUnitTest('org.pantsbuild.Error', 'testError')}}, failed_targets)
| 41.802575 | 100 | 0.656571 |
4f90c200fd9f371f229c57626a42cf12b77902c5 | 206 | py | Python | zeromq/test/t_dirmon.py | LuckierDodge/liquidhandling | 56e3c325f5c3c3360dc96b4cc0013909256503b9 | [
"MIT"
] | 1 | 2021-06-29T20:24:38.000Z | 2021-06-29T20:24:38.000Z | zeromq/test/t_dirmon.py | AD-SDL/hudson-liquidhandling | a9d7ba9c85062e821ba8e650f4e4ee011c80be4e | [
"MIT"
] | null | null | null | zeromq/test/t_dirmon.py | AD-SDL/hudson-liquidhandling | a9d7ba9c85062e821ba8e650f4e4ee011c80be4e | [
"MIT"
] | 1 | 2021-03-25T13:47:42.000Z | 2021-03-25T13:47:42.000Z | from dirmon import checkDir
import time
# check for files that were modified in the last 10 minutes
t = 600
new_files = checkDir(".", last_mtime=t)
for f in new_files:
print("new file {}".format(f))
| 18.727273 | 59 | 0.713592 |
0a98fdcab44e5b3f634e282129a9e1f600f49fcc | 11,275 | py | Python | telethon/events/callbackquery.py | spankders/Telethon | 2fb560624d0de34655b6ffa879f4a5020a2bf8f2 | [
"MIT"
] | 1 | 2019-07-20T08:28:10.000Z | 2019-07-20T08:28:10.000Z | telethon/events/callbackquery.py | spankders/Telethon | 2fb560624d0de34655b6ffa879f4a5020a2bf8f2 | [
"MIT"
] | null | null | null | telethon/events/callbackquery.py | spankders/Telethon | 2fb560624d0de34655b6ffa879f4a5020a2bf8f2 | [
"MIT"
] | null | null | null | import re
import struct
from .common import EventBuilder, EventCommon, name_inner_event
from .. import utils
from ..tl import types, functions
from ..tl.custom.sendergetter import SenderGetter
@name_inner_event
class CallbackQuery(EventBuilder):
"""
Represents a callback query event (when an inline button is clicked).
Note that the `chats` parameter will **not** work with normal
IDs or peers if the clicked inline button comes from a "via bot"
message. The `chats` parameter also supports checking against the
`chat_instance` which should be used for inline callbacks.
Args:
data (`bytes` | `str` | `callable`, optional):
If set, the inline button payload data must match this data.
A UTF-8 string can also be given, a regex or a callable. For
instance, to check against ``'data_1'`` and ``'data_2'`` you
can use ``re.compile(b'data_')``.
"""
def __init__(
self, chats=None, *, blacklist_chats=False, func=None, data=None):
super().__init__(chats, blacklist_chats=blacklist_chats, func=func)
if isinstance(data, bytes):
self.data = data
elif isinstance(data, str):
self.data = data.encode('utf-8')
elif not data or callable(data):
self.data = data
elif hasattr(data, 'match') and callable(data.match):
if not isinstance(getattr(data, 'pattern', b''), bytes):
data = re.compile(data.pattern.encode('utf-8'),
data.flags & (~re.UNICODE))
self.data = data.match
else:
raise TypeError('Invalid data type given')
@classmethod
def build(cls, update):
if isinstance(update, types.UpdateBotCallbackQuery):
event = cls.Event(update, update.peer, update.msg_id)
elif isinstance(update, types.UpdateInlineBotCallbackQuery):
# See https://github.com/LonamiWebs/Telethon/pull/1005
# The long message ID is actually just msg_id + peer_id
mid, pid = struct.unpack('<ii', struct.pack('<q', update.msg_id.id))
peer = types.PeerChannel(-pid) if pid < 0 else types.PeerUser(pid)
event = cls.Event(update, peer, mid)
else:
return
event._entities = update._entities
return event
def filter(self, event):
if self.chats is not None:
inside = event.query.chat_instance in self.chats
if event.chat_id:
inside |= event.chat_id in self.chats
if inside == self.blacklist_chats:
return None
if self.data:
if callable(self.data):
event.data_match = self.data(event.query.data)
if not event.data_match:
return None
elif event.query.data != self.data:
return None
return event
class Event(EventCommon, SenderGetter):
"""
Represents the event of a new callback query.
Members:
query (:tl:`UpdateBotCallbackQuery`):
The original :tl:`UpdateBotCallbackQuery`.
data_match (`obj`, optional):
The object returned by the ``data=`` parameter
when creating the event builder, if any. Similar
to ``pattern_match`` for the new message event.
"""
def __init__(self, query, peer, msg_id):
super().__init__(peer, msg_id=msg_id)
self.query = query
self.data_match = None
self._sender_id = query.user_id
self._input_sender = None
self._sender = None
self._message = None
self._answered = False
def _load_entities(self):
self._sender, self._input_sender = self._get_entity_pair(self.sender_id)
return super()._load_entities() and self._input_sender is not None
@property
def id(self):
"""
Returns the query ID. The user clicking the inline
button is the one who generated this random ID.
"""
return self.query.query_id
@property
def message_id(self):
"""
Returns the message ID to which the clicked inline button belongs.
"""
return self._message_id
@property
def data(self):
"""
Returns the data payload from the original inline button.
"""
return self.query.data
@property
def chat_instance(self):
"""
Unique identifier for the chat where the callback occurred.
Useful for high scores in games.
"""
return self.query.chat_instance
async def get_message(self):
"""
Returns the message to which the clicked inline button belongs.
"""
if self._message is not None:
return self._message
try:
chat = await self.get_input_chat() if self.is_channel else None
self._message = await self._client.get_messages(
chat, ids=self._message_id)
except ValueError:
return
return self._message
async def _refetch_sender(self):
self._sender = self._entities.get(self.sender_id)
if not self._sender:
return
self._input_sender = utils.get_input_peer(self._chat)
if not getattr(self._input_sender, 'access_hash', True):
# getattr with True to handle the InputPeerSelf() case
try:
self._input_sender = self._client._entity_cache[self._sender_id]
except KeyError:
m = await self.get_message()
if m:
self._sender = m._sender
self._input_sender = m._input_sender
async def answer(
self, message=None, cache_time=0, *, url=None, alert=False):
"""
Answers the callback query (and stops the loading circle).
Args:
message (`str`, optional):
The toast message to show feedback to the user.
cache_time (`int`, optional):
For how long this result should be cached on
the user's client. Defaults to 0 for no cache.
url (`str`, optional):
The URL to be opened in the user's client. Note that
the only valid URLs are those of games your bot has,
or alternatively a 't.me/your_bot?start=xyz' parameter.
alert (`bool`, optional):
Whether an alert (a pop-up dialog) should be used
instead of showing a toast. Defaults to ``False``.
"""
if self._answered:
return
self._answered = True
return await self._client(
functions.messages.SetBotCallbackAnswerRequest(
query_id=self.query.query_id,
cache_time=cache_time,
alert=alert,
message=message,
url=url
)
)
@property
def via_inline(self):
"""
Whether this callback was generated from an inline button sent
via an inline query or not. If the bot sent the message itself
with buttons, and one of those is clicked, this will be ``False``.
If a user sent the message coming from an inline query to the
bot, and one of those is clicked, this will be ``True``.
If it's ``True``, it's likely that the bot is **not** in the
chat, so methods like `respond` or `delete` won't work (but
`edit` will always work).
"""
return isinstance(self.query, types.UpdateInlineBotCallbackQuery)
async def respond(self, *args, **kwargs):
"""
Responds to the message (not as a reply). Shorthand for
`telethon.client.messages.MessageMethods.send_message` with
``entity`` already set.
This method also creates a task to `answer` the callback.
This method will likely fail if `via_inline` is ``True``.
"""
self._client.loop.create_task(self.answer())
return await self._client.send_message(
await self.get_input_chat(), *args, **kwargs)
async def reply(self, *args, **kwargs):
"""
Replies to the message (as a reply). Shorthand for
`telethon.client.messages.MessageMethods.send_message` with
both ``entity`` and ``reply_to`` already set.
This method also creates a task to `answer` the callback.
This method will likely fail if `via_inline` is ``True``.
"""
self._client.loop.create_task(self.answer())
kwargs['reply_to'] = self.query.msg_id
return await self._client.send_message(
await self.get_input_chat(), *args, **kwargs)
async def edit(self, *args, **kwargs):
"""
Edits the message. Shorthand for
`telethon.client.messages.MessageMethods.edit_message` with
the ``entity`` set to the correct :tl:`InputBotInlineMessageID`.
Returns ``True`` if the edit was successful.
This method also creates a task to `answer` the callback.
.. note::
This method won't respect the previous message unlike
`Message.edit <telethon.tl.custom.message.Message.edit>`,
since the message object is normally not present.
"""
self._client.loop.create_task(self.answer())
if isinstance(self.query.msg_id, types.InputBotInlineMessageID):
return await self._client.edit_message(
self.query.msg_id, *args, **kwargs
)
else:
return await self._client.edit_message(
await self.get_input_chat(), self.query.msg_id,
*args, **kwargs
)
async def delete(self, *args, **kwargs):
"""
Deletes the message. Shorthand for
`telethon.client.messages.MessageMethods.delete_messages` with
``entity`` and ``message_ids`` already set.
If you need to delete more than one message at once, don't use
this `delete` method. Use a
`telethon.client.telegramclient.TelegramClient` instance directly.
This method also creates a task to `answer` the callback.
This method will likely fail if `via_inline` is ``True``.
"""
self._client.loop.create_task(self.answer())
return await self._client.delete_messages(
await self.get_input_chat(), [self.query.msg_id],
*args, **kwargs
)
| 38.091216 | 84 | 0.560355 |
fe35a66d9aed9485c42fb229b03b04677bb1035f | 2,318 | py | Python | NLP/lemmatizer.py | mtorabirad/Sentiment-Analysis-Tweets | e5afc5c1ae47eac9110996279be05ba3ee6346a5 | [
"MIT"
] | 1 | 2021-01-17T17:17:55.000Z | 2021-01-17T17:17:55.000Z | NLP/lemmatizer.py | mtorabirad/Sentiment-Analysis-Tweets | e5afc5c1ae47eac9110996279be05ba3ee6346a5 | [
"MIT"
] | null | null | null | NLP/lemmatizer.py | mtorabirad/Sentiment-Analysis-Tweets | e5afc5c1ae47eac9110996279be05ba3ee6346a5 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import spacy
import re
from joblib import Parallel, delayed
from spacy.lang.en.stop_words import STOP_WORDS
import en_core_web_sm
nlp = en_core_web_sm.load(disable=['tagger', 'parser', 'ner'])
stopwordfile = r'AfterDec28\stopwords.txt'
def _get_stopwords(from_file=False):
"Return a set of stopwords read in from a file."
if from_file:
with open(stopwordfile) as f:
stopwords = []
for line in f:
stopwords.append(line.strip("\n"))
else:
stopwords = STOP_WORDS
# Convert to set for performance
stopwords_set = set(stopwords)
return stopwords_set
def _lemmatize_pipe(doc):
stop_words = _get_stopwords(from_file=False)
lemma_list = [str(tok.lemma_).lower() for tok in doc
if tok.is_alpha and tok.text.lower() not in stop_words]
return lemma_list
def chunker(iterable, total_length, chunk):
return (iterable[pos: pos + chunk] for pos in range(0, total_length,
chunk))
def flatten(list_of_lists):
"Flatten a list of lists to a combined list"
return [item for sublist in list_of_lists for item in sublist]
def _process_chunk(texts):
# texts is a pandas series.
preproc_pipe = []
# Process the texts as a stream using nlp.pipe and buffer them in batches
# instead of one-by-one. This is usually much more efficient.
# nlp.pipe() is a generator that processes text as stream and yields
# a doc object in order.
# See https://spacy.io/api/language
# There are two ways to get values from generators: the next() function
# and a for loop. The for loop is often the preferred method.
# What is lemmatize_pipe doing here?
# batch_size: The number of texts to buffer. What does this mean?
for doc in nlp.pipe(texts, batch_size=20):
preproc_pipe.append(_lemmatize_pipe(doc))
return preproc_pipe
def parallel_lemmatize_in_chunks(df, chunk, **parameters):
# chunk_size = parameters['chunk_size']
executor = Parallel(**parameters)
do = delayed(_process_chunk)
tasks = (do(chunk) for chunk in chunker(df['clean'], len(df),
chunk=chunk))
result = executor(tasks)
return flatten(result)
| 32.647887 | 77 | 0.66566 |
48bf7c44fc415a5b85c03b432cb48353c46edf7c | 2,011 | py | Python | Book_Ladder/web/page/views.py | Rdjroot/BookLadder | d4e1f90572f2dda2e7c25890b99c965ded0f02c8 | [
"MIT"
] | null | null | null | Book_Ladder/web/page/views.py | Rdjroot/BookLadder | d4e1f90572f2dda2e7c25890b99c965ded0f02c8 | [
"MIT"
] | null | null | null | Book_Ladder/web/page/views.py | Rdjroot/BookLadder | d4e1f90572f2dda2e7c25890b99c965ded0f02c8 | [
"MIT"
] | null | null | null | # -*- coding = utf-8 -*-
# @Time:2021/3/713:31
# @Author:Linyu
# @Software:PyCharm
from . import page
from flask import Flask,render_template
from web.pageutils import scoreRelise
from web.pageutils import BooksScore
from web.pageutils import BooksCount
from web.pageutils import pointsDraw
from web.pageutils import messBarInfo
from web.pageutils import tagRader
@page.route("/Analyse")
def analyse():
#左一图
lineData = scoreRelise()
#左二图
scoreDic = BooksScore()
countDic = BooksCount()
#中间图
RaderInfo = tagRader()
lit = RaderInfo[0]
sci = RaderInfo[1]
# 右一图
dataGroup = pointsDraw()
#右二
allInfo = messBarInfo()
typeInfo = allInfo[0]
commInfo = allInfo[1]
suppInfo = allInfo[2]
sameInfo = allInfo[3]
return render_template("page/main.html",data =lineData,scoreDic = scoreDic,
countDic = countDic,lit = lit,sci = sci,dataGroup = dataGroup,
typeInfo = typeInfo,commInfo= commInfo, suppInfo = suppInfo,sameInfo = sameInfo)
@page.route("/score")
def score():
lineData = scoreRelise()
return render_template("page/scorePage.html",data =lineData)
@page.route("/typePre")
def typePre():
scoreDic = BooksScore()
countDic = BooksCount()
return render_template("page/typeAnalyse.html",scoreDic = scoreDic,countDic = countDic)
@page.route("/points")
def pointsPic():
dataGroup = pointsDraw()
return render_template("page/pointsPic.html",dataGroup = dataGroup)
@page.route("/messbar")
def messBar():
allInfo = messBarInfo()
typeInfo = allInfo[0]
commInfo = allInfo[1]
suppInfo = allInfo[2]
sameInfo = allInfo[3]
return render_template("page/messBars.html",typeInfo = typeInfo,commInfo= commInfo,
suppInfo = suppInfo,sameInfo = sameInfo)
@page.route("/RadarPic")
def RadarPic():
RaderInfo = tagRader()
lit = RaderInfo[0]
sci = RaderInfo[1]
return render_template("page/radarTag.html",lit = lit,sci = sci) | 28.728571 | 107 | 0.674789 |
0b4147175743ed3aaf1892693cb05adfeed3136a | 24,453 | py | Python | packages/python/yap_kernel/yap_ipython/lib/demo.py | ryandesign/yap | 9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214 | [
"Artistic-1.0-Perl",
"ClArtistic"
] | 90 | 2015-03-09T01:24:15.000Z | 2022-02-24T13:56:25.000Z | packages/python/yap_kernel/yap_ipython/lib/demo.py | ryandesign/yap | 9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214 | [
"Artistic-1.0-Perl",
"ClArtistic"
] | 52 | 2016-02-14T08:59:37.000Z | 2022-03-14T16:39:35.000Z | packages/python/yap_kernel/yap_ipython/lib/demo.py | ryandesign/yap | 9a50d1a3d985ec559ebfbb8e9f4d4c6b88b30214 | [
"Artistic-1.0-Perl",
"ClArtistic"
] | 27 | 2015-11-19T02:45:49.000Z | 2021-11-25T19:47:58.000Z | """Module for interactive demos using yap_ipython.
This module implements a few classes for running Python scripts interactively
in IPython for demonstrations. With very simple markup (a few tags in
comments), you can control points where the script stops executing and returns
control to yap_ipython.
Provided classes
----------------
The classes are (see their docstrings for further details):
- Demo: pure python demos
- IPythonDemo: demos with input to be processed by IPython as if it had been
typed interactively (so magics work, as well as any other special syntax you
may have added via input prefilters).
- LineDemo: single-line version of the Demo class. These demos are executed
one line at a time, and require no markup.
- IPythonLineDemo: IPython version of the LineDemo class (the demo is
executed a line at a time, but processed via IPython).
- ClearMixin: mixin to make Demo classes with less visual clutter. It
declares an empty marquee and a pre_cmd that clears the screen before each
block (see Subclassing below).
- ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
classes.
Inheritance diagram:
.. inheritance-diagram:: yap_ipython.lib.demo
:parts: 3
Subclassing
-----------
The classes here all include a few methods meant to make customization by
subclassing more convenient. Their docstrings below have some more details:
- highlight(): format every block and optionally highlight comments and
docstring content.
- marquee(): generates a marquee to provide visible on-screen markers at each
block start and end.
- pre_cmd(): run right before the execution of each block.
- post_cmd(): run right after the execution of each block. If the block
raises an exception, this is NOT called.
Operation
---------
The file is run in its own empty namespace (though you can pass it a string of
arguments as if in a command line environment, and it will see those as
sys.argv). But at each stop, the global IPython namespace is updated with the
current internal demo namespace, so you can work interactively with the data
accumulated so far.
By default, each block of code is printed (with syntax highlighting) before
executing it and you have to confirm execution. This is intended to show the
code to an audience first so you can discuss it, and only proceed with
execution once you agree. There are a few tags which allow you to modify this
behavior.
The supported tags are:
# <demo> stop
Defines block boundaries, the points where IPython stops execution of the
file and returns to the interactive prompt.
You can optionally mark the stop tag with extra dashes before and after the
word 'stop', to help visually distinguish the blocks in a text editor:
# <demo> --- stop ---
# <demo> silent
Make a block execute silently (and hence automatically). Typically used in
cases where you have some boilerplate or initialization code which you need
executed but do not want to be seen in the demo.
# <demo> auto
Make a block execute automatically, but still being printed. Useful for
simple code which does not warrant discussion, since it avoids the extra
manual confirmation.
# <demo> auto_all
This tag can _only_ be in the first block, and if given it overrides the
individual auto tags to make the whole demo fully automatic (no block asks
for confirmation). It can also be given at creation time (or the attribute
set later) to override what's in the file.
While _any_ python file can be run as a Demo instance, if there are no stop
tags the whole file will run in a single block (no different that calling
first %pycat and then %run). The minimal markup to make this useful is to
place a set of stop tags; the other tags are only there to let you fine-tune
the execution.
This is probably best explained with the simple example file below. You can
copy this into a file named ex_demo.py, and try running it via::
from yap_ipython.lib.demo import Demo
d = Demo('ex_demo.py')
d()
Each time you call the demo object, it runs the next block. The demo object
has a few useful methods for navigation, like again(), edit(), jump(), seek()
and back(). It can be reset for a new run via reset() or reloaded from disk
(in case you've edited the source) via reload(). See their docstrings below.
Note: To make this simpler to explore, a file called "demo-exercizer.py" has
been added to the "docs/examples/core" directory. Just cd to this directory in
an IPython session, and type::
%run demo-exercizer.py
and then follow the directions.
Example
-------
The following is a very simple example of a valid demo file.
::
#################### EXAMPLE DEMO <ex_demo.py> ###############################
'''A simple interactive demo to illustrate the use of IPython's Demo class.'''
print 'Hello, welcome to an interactive IPython demo.'
# The mark below defines a block boundary, which is a point where IPython will
# stop execution and return to the interactive prompt. The dashes are actually
# optional and used only as a visual aid to clearly separate blocks while
# editing the demo code.
# <demo> stop
x = 1
y = 2
# <demo> stop
# the mark below makes this block as silent
# <demo> silent
print 'This is a silent block, which gets executed but not printed.'
# <demo> stop
# <demo> auto
print 'This is an automatic block.'
print 'It is executed without asking for confirmation, but printed.'
z = x+y
print 'z=',x
# <demo> stop
# This is just another normal block.
print 'z is now:', z
print 'bye!'
################### END EXAMPLE DEMO <ex_demo.py> ############################
"""
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#
#*****************************************************************************
import os
import re
import shlex
import sys
import pygments
from yap_ipython.utils.text import marquee
from yap_ipython.utils import openpy
from yap_ipython.utils import py3compat
__all__ = ['Demo','yap_ipythonDemo','LineDemo','yap_ipythonLineDemo','DemoError']
class DemoError(Exception): pass
def re_mark(mark):
return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
class Demo(object):
re_stop = re_mark('-*\s?stop\s?-*')
re_silent = re_mark('silent')
re_auto = re_mark('auto')
re_auto_all = re_mark('auto_all')
def __init__(self,src,title='',arg_str='',auto_all=None, format_rst=False,
formatter='terminal', style='default'):
"""Make a new demo object. To run the demo, simply call the object.
See the module docstring for full details and an example (you can use
yap_ipython.Demo? in yap_ipython to see it).
Inputs:
- src is either a file, or file-like object, or a
string that can be resolved to a filename.
Optional inputs:
- title: a string to use as the demo name. Of most use when the demo
you are making comes from an object that has no filename, or if you
want an alternate denotation distinct from the filename.
- arg_str(''): a string of arguments, internally converted to a list
just like sys.argv, so the demo script can see a similar
environment.
- auto_all(None): global flag to run all blocks automatically without
confirmation. This attribute overrides the block-level tags and
applies to the whole demo. It is an attribute of the object, and
can be changed at runtime simply by reassigning it to a boolean
value.
- format_rst(False): a bool to enable comments and doc strings
formatting with pygments rst lexer
- formatter('terminal'): a string of pygments formatter name to be
used. Useful values for terminals: terminal, terminal256,
terminal16m
- style('default'): a string of pygments style name to be used.
"""
if hasattr(src, "read"):
# It seems to be a file or a file-like object
self.fname = "from a file-like object"
if title == '':
self.title = "from a file-like object"
else:
self.title = title
else:
# Assume it's a string or something that can be converted to one
self.fname = src
if title == '':
(filepath, filename) = os.path.split(src)
self.title = filename
else:
self.title = title
self.sys_argv = [src] + shlex.split(arg_str)
self.auto_all = auto_all
self.src = src
self.inside_ipython = "get_ipython" in globals()
if self.inside_ipython:
# get a few things from ipython. While it's a bit ugly design-wise,
# it ensures that things like color scheme and the like are always in
# sync with the ipython mode being used. This class is only meant to
# be used inside ipython anyways, so it's OK.
ip = get_ipython() # this is in builtins whenever yap_ipython is running
self.ip_ns = ip.user_ns
self.ip_colorize = ip.pycolorize
self.ip_showtb = ip.showtraceback
self.ip_run_cell = ip.run_cell
self.shell = ip
self.formatter = pygments.formatters.get_formatter_by_name(formatter,
style=style)
self.python_lexer = pygments.lexers.get_lexer_by_name("py3")
self.format_rst = format_rst
if format_rst:
self.rst_lexer = pygments.lexers.get_lexer_by_name("rst")
# load user data and initialize data structures
self.reload()
def fload(self):
"""Load file object."""
# read data and parse into blocks
if hasattr(self, 'fobj') and self.fobj is not None:
self.fobj.close()
if hasattr(self.src, "read"):
# It seems to be a file or a file-like object
self.fobj = self.src
else:
# Assume it's a string or something that can be converted to one
self.fobj = openpy.open(self.fname)
def reload(self):
"""Reload source from disk and initialize state."""
self.fload()
self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
# if auto_all is not given (def. None), we read it from the file
if self.auto_all is None:
self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
else:
self.auto_all = bool(self.auto_all)
# Clean the sources from all markup so it doesn't get displayed when
# running the demo
src_blocks = []
auto_strip = lambda s: self.re_auto.sub('',s)
for i,b in enumerate(src_b):
if self._auto[i]:
src_blocks.append(auto_strip(b))
else:
src_blocks.append(b)
# remove the auto_all marker
src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
self.nblocks = len(src_blocks)
self.src_blocks = src_blocks
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
def reset(self):
"""Reset the namespace and seek pointer to restart the demo"""
self.user_ns = {}
self.finished = False
self.block_index = 0
def _validate_index(self,index):
if index<0 or index>=self.nblocks:
raise ValueError('invalid block index %s' % index)
def _get_index(self,index):
"""Get the current block index, validating and checking status.
Returns None if the demo is finished"""
if index is None:
if self.finished:
print('Demo finished. Use <demo_name>.reset() if you want to rerun it.')
return None
index = self.block_index
else:
self._validate_index(index)
return index
def seek(self,index):
"""Move the current seek pointer to the given block.
You can use negative indices to seek from the end, with identical
semantics to those of Python lists."""
if index<0:
index = self.nblocks + index
self._validate_index(index)
self.block_index = index
self.finished = False
def back(self,num=1):
"""Move the seek pointer back num blocks (default is 1)."""
self.seek(self.block_index-num)
def jump(self,num=1):
"""Jump a given number of blocks relative to the current one.
The offset can be positive or negative, defaults to 1."""
self.seek(self.block_index+num)
def again(self):
"""Move the seek pointer back one block and re-execute."""
self.back(1)
self()
def edit(self,index=None):
"""Edit a block.
If no number is given, use the last block executed.
This edits the in-memory copy of the demo, it does NOT modify the
original source file. If you want to do that, simply open the file in
an editor and use reload() when you make changes to the file. This
method is meant to let you change a block during a demonstration for
explanatory purposes, without damaging your original script."""
index = self._get_index(index)
if index is None:
return
# decrease the index by one (unless we're at the very beginning), so
# that the default demo.edit() call opens up the sblock we've last run
if index>0:
index -= 1
filename = self.shell.mktempfile(self.src_blocks[index])
self.shell.hooks.editor(filename,1)
with open(filename, 'r') as f:
new_block = f.read()
# update the source and colored block
self.src_blocks[index] = new_block
self.src_blocks_colored[index] = self.highlight(new_block)
self.block_index = index
# call to run with the newly edited index
self()
def show(self,index=None):
"""Show a single block on screen"""
index = self._get_index(index)
if index is None:
return
print(self.marquee('<%s> block # %s (%s remaining)' %
(self.title,index,self.nblocks-index-1)))
print(self.src_blocks_colored[index])
sys.stdout.flush()
def show_all(self):
"""Show entire demo on screen, block by block"""
fname = self.title
title = self.title
nblocks = self.nblocks
silent = self._silent
marquee = self.marquee
for index,block in enumerate(self.src_blocks_colored):
if silent[index]:
print(marquee('<%s> SILENT block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
else:
print(marquee('<%s> block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
print(block, end=' ')
sys.stdout.flush()
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
exec(source, self.user_ns)
def __call__(self,index=None):
"""run a block of the demo.
If index is given, it should be an integer >=1 and <= nblocks. This
means that the calling convention is one off from typical Python
lists. The reason for the inconsistency is that the demo always
prints 'Block n/N, and N is the total, so it would be very odd to use
zero-indexing here."""
index = self._get_index(index)
if index is None:
return
try:
marquee = self.marquee
next_block = self.src_blocks[index]
self.block_index += 1
if self._silent[index]:
print(marquee('Executing silent block # %s (%s remaining)' %
(index,self.nblocks-index-1)))
else:
self.pre_cmd()
self.show(index)
if self.auto_all or self._auto[index]:
print(marquee('output:'))
else:
print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ')
ans = py3compat.input().strip()
if ans:
print(marquee('Block NOT executed'))
return
try:
save_argv = sys.argv
sys.argv = self.sys_argv
self.run_cell(next_block)
self.post_cmd()
finally:
sys.argv = save_argv
except:
if self.inside_ipython:
self.ip_showtb(filename=self.fname)
else:
if self.inside_ipython:
self.ip_ns.update(self.user_ns)
if self.block_index == self.nblocks:
mq1 = self.marquee('END OF DEMO')
if mq1:
# avoid spurious print if empty marquees are used
print()
print(mq1)
print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'))
self.finished = True
# These methods are meant to be overridden by subclasses who may wish to
# customize the behavior of of their demos.
def marquee(self,txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'."""
return marquee(txt,width,mark)
def pre_cmd(self):
"""Method called before executing each block."""
pass
def post_cmd(self):
"""Method called after executing each block."""
pass
def highlight(self, block):
"""Method called on each block to highlight it content"""
tokens = pygments.lex(block, self.python_lexer)
if self.format_rst:
from pygments.token import Token
toks = []
for token in tokens:
if token[0] == Token.String.Doc and len(token[1]) > 6:
toks += pygments.lex(token[1][:3], self.python_lexer)
# parse doc string content by rst lexer
toks += pygments.lex(token[1][3:-3], self.rst_lexer)
toks += pygments.lex(token[1][-3:], self.python_lexer)
elif token[0] == Token.Comment.Single:
toks.append((Token.Comment.Single, token[1][0]))
# parse comment content by rst lexer
# remove the extrat newline added by rst lexer
toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
else:
toks.append(token)
tokens = toks
return pygments.format(tokens, self.formatter)
class IPythonDemo(Demo):
"""Class for interactive demos with IPython's input processing applied.
This subclasses Demo, but instead of executing each block by the Python
interpreter (via exec), it actually calls IPython on it, so that any input
filters which may be in place are applied to the input block.
If you have an interactive environment which exposes special input
processing, you can use this class instead to write demo scripts which
operate exactly as if you had typed them interactively. The default Demo
class requires the input to be valid, pure Python code.
"""
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
self.shell.run_cell(source)
class LineDemo(Demo):
"""Demo where each line is executed as a separate block.
The input script should be valid Python code.
This class doesn't require any markup at all, and it's meant for simple
scripts (with no nesting or any kind of indentation) which consist of
multiple lines of input to be executed, one at a time, as if they had been
typed in the interactive prompt.
Note: the input can not have *any* indentation, which means that only
single-lines of input are accepted, not even function definitions are
valid."""
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.fload()
lines = self.fobj.readlines()
src_b = [l for l in lines if l.strip()]
nblocks = len(src_b)
self.src = ''.join(lines)
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
class IPythonLineDemo(IPythonDemo,LineDemo):
"""Variant of the LineDemo class whose input is processed by yap_ipython."""
pass
class ClearMixin(object):
"""Use this mixin to make Demo classes with less visual clutter.
Demos using this mixin will clear the screen before every block and use
blank marquees.
Note that in order for the methods defined here to actually override those
of the classes it's mixed with, it must go /first/ in the inheritance
tree. For example:
class ClearIPDemo(ClearMixin,IPythonDemo): pass
will provide an IPythonDemo class with the mixin's features.
"""
def marquee(self,txt='',width=78,mark='*'):
"""Blank marquee that returns '' no matter what the input."""
return ''
def pre_cmd(self):
"""Method called before executing each block.
This one simply clears the screen."""
from yap_ipython.utils.terminal import _term_clear
_term_clear()
class ClearDemo(ClearMixin,Demo):
pass
class ClearIPDemo(ClearMixin,IPythonDemo):
pass
def slide(file_path, noclear=False, format_rst=True, formatter="terminal",
style="native", auto_all=False, delimiter='...'):
if noclear:
demo_class = Demo
else:
demo_class = ClearDemo
demo = demo_class(file_path, format_rst=format_rst, formatter=formatter,
style=style, auto_all=auto_all)
while not demo.finished:
demo()
try:
py3compat.input('\n' + delimiter)
except KeyboardInterrupt:
exit(1)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Run python demos')
parser.add_argument('--noclear', '-C', action='store_true',
help='Do not clear terminal on each slide')
parser.add_argument('--rst', '-r', action='store_true',
help='Highlight comments and dostrings as rst')
parser.add_argument('--formatter', '-f', default='terminal',
help='pygments formatter name could be: terminal, '
'terminal256, terminal16m')
parser.add_argument('--style', '-s', default='default',
help='pygments style name')
parser.add_argument('--auto', '-a', action='store_true',
help='Run all blocks automatically without'
'confirmation')
parser.add_argument('--delimiter', '-d', default='...',
help='slides delimiter added after each slide run')
parser.add_argument('file', nargs=1,
help='python demo file')
args = parser.parse_args()
slide(args.file[0], noclear=args.noclear, format_rst=args.rst,
formatter=args.formatter, style=args.style, auto_all=args.auto,
delimiter=args.delimiter)
| 36.606287 | 89 | 0.620333 |
e22af69ace2190e849f918f07293148dd773f1e8 | 2,088 | py | Python | piwise/transform.py | heixialeeLeon/segment_piwise | 4eaeeb4aa36298c1140c9fb8a6adef7a3bcfb03f | [
"BSD-3-Clause"
] | null | null | null | piwise/transform.py | heixialeeLeon/segment_piwise | 4eaeeb4aa36298c1140c9fb8a6adef7a3bcfb03f | [
"BSD-3-Clause"
] | null | null | null | piwise/transform.py | heixialeeLeon/segment_piwise | 4eaeeb4aa36298c1140c9fb8a6adef7a3bcfb03f | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import torch
from PIL import Image
def colormap(n):
cmap=np.zeros([n, 3]).astype(np.uint8)
for i in np.arange(n):
r, g, b = np.zeros(3)
for j in np.arange(8):
r = r + (1<<(7-j))*((i&(1<<(3*j))) >> (3*j))
g = g + (1<<(7-j))*((i&(1<<(3*j+1))) >> (3*j+1))
b = b + (1<<(7-j))*((i&(1<<(3*j+2))) >> (3*j+2))
cmap[i,:] = np.array([r, g, b])
return cmap
class Relabel:
def __init__(self, olabel, nlabel):
self.olabel = olabel
self.nlabel = nlabel
def __call__(self, tensor):
assert isinstance(tensor, torch.LongTensor), 'tensor needs to be LongTensor'
tensor[tensor == self.olabel] = self.nlabel
return tensor
class ToLabel:
def __call__(self, image):
return torch.from_numpy(np.array(image)).long().unsqueeze(0)
class Colorize:
def __init__(self, n=22):
self.cmap = colormap(256)
self.cmap[n] = self.cmap[-1]
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(1, len(self.cmap)):
mask = gray_image[0] == label
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
class Colorize2:
def __init__(self, n=22):
self.cmap = colormap(256)
self.cmap[n] = self.cmap[-1]
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[0], size[1]).fill_(0)
len_cmap = len(self.cmap)
for label in range(1, len(self.cmap)):
mask = gray_image == label
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image | 26.769231 | 84 | 0.556034 |
a0dfb37409cb18601c6a0c5c43a29a2c29c66d5d | 92 | py | Python | orquestador/nyc_ccci_etl/utils/get_current_ip.py | gemathus/dpa-2020 | b5d7a350b058e77a9b358fa2758632fa2265a9cb | [
"IJG"
] | 1 | 2020-04-01T01:12:16.000Z | 2020-04-01T01:12:16.000Z | orquestador/nyc_ccci_etl/utils/get_current_ip.py | gemathus/dpa-2020 | b5d7a350b058e77a9b358fa2758632fa2265a9cb | [
"IJG"
] | 3 | 2021-06-02T02:01:07.000Z | 2022-03-12T00:33:16.000Z | orquestador/nyc_ccci_etl/utils/get_current_ip.py | dpa-2020-equipo-5/dpa-2020 | b5d7a350b058e77a9b358fa2758632fa2265a9cb | [
"IJG"
] | 3 | 2020-05-11T01:15:14.000Z | 2021-03-12T02:34:23.000Z | from requests import get
def get_current_ip():
return get('https://api.ipify.org').text | 23 | 44 | 0.728261 |
c17f7eb7656d0186564f80afbc2a30b8486e0d7b | 505 | py | Python | python/XML2_FindTheMaximumDepth.py | HannoFlohr/hackerrank | 9644c78ce05a6b1bc5d8f542966781d53e5366e3 | [
"MIT"
] | null | null | null | python/XML2_FindTheMaximumDepth.py | HannoFlohr/hackerrank | 9644c78ce05a6b1bc5d8f542966781d53e5366e3 | [
"MIT"
] | null | null | null | python/XML2_FindTheMaximumDepth.py | HannoFlohr/hackerrank | 9644c78ce05a6b1bc5d8f542966781d53e5366e3 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as etree
maxdepth = 0
def depth(elem, level):
global maxdepth
level += 1
if (level > maxdepth): maxdepth = level
for child in elem:
depth(child, level)
if __name__ == '__main__':
n = int(input())
xml = ""
for i in range(n):
xml = xml + input() + "\n"
tree = etree.ElementTree(etree.fromstring(xml))
depth(tree.getroot(), -1)
print(maxdepth)
#https://www.hackerrank.com/challenges/xml2-find-the-maximum-depth/problem | 25.25 | 74 | 0.629703 |
d3473c63c6db0d6bea8d91cf3f531e9cd83870c1 | 2,426 | py | Python | python/tvm/__init__.py | mwillsey/incubator-tvm | e02dc69fef294eb73dd65d18949ed9e108f60cda | [
"Apache-2.0"
] | 2 | 2020-04-17T02:25:16.000Z | 2020-11-25T11:39:43.000Z | python/tvm/__init__.py | mwillsey/incubator-tvm | e02dc69fef294eb73dd65d18949ed9e108f60cda | [
"Apache-2.0"
] | 3 | 2020-04-20T15:37:55.000Z | 2020-05-13T05:34:28.000Z | python/tvm/__init__.py | mwillsey/incubator-tvm | e02dc69fef294eb73dd65d18949ed9e108f60cda | [
"Apache-2.0"
] | 1 | 2020-04-08T07:08:04.000Z | 2020-04-08T07:08:04.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin, wildcard-import
"""TVM: Open Deep Learning Compiler Stack."""
import multiprocessing
import sys
import traceback
# top-level alias
# tvm._ffi
from ._ffi.base import TVMError, __version__
from ._ffi.runtime_ctypes import DataTypeCode, DataType
from ._ffi import register_object, register_func, register_extension, get_global_func
# top-level alias
# tvm.runtime
from .runtime.object import Object
from .runtime.ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl
from .runtime.ndarray import vpi, rocm, ext_dev, micro_dev, hexagon
from .runtime import ndarray as nd
# tvm.error
from . import error
# tvm.ir
from .ir import IRModule
from .ir import transform
from .ir import container
from . import ir
# tvm.tir
from . import tir
# tvm.target
from . import target
# tvm.te
from . import te
# tvm.driver
from .driver import build, lower
# tvm.parser
from . import parser
# tvm tir hybrid script
from . import hybrid
# others
from . import arith
# support infra
from . import support
# Contrib initializers
from .contrib import rocm as _rocm, nvcc as _nvcc, sdaccel as _sdaccel
def tvm_wrap_excepthook(exception_hook):
"""Wrap given excepthook with TVM additional work."""
def wrapper(exctype, value, trbk):
"""Clean subprocesses when TVM is interrupted."""
exception_hook(exctype, value, trbk)
if hasattr(multiprocessing, "active_children"):
# pylint: disable=not-callable
for p in multiprocessing.active_children():
p.terminate()
return wrapper
sys.excepthook = tvm_wrap_excepthook(sys.excepthook)
| 27.568182 | 85 | 0.749382 |
ca433895877ffecec9f08c8c5595fe2fd9e46b49 | 902 | py | Python | run_video_feature_from_clips.py | MYusha/video-classification-3d-cnn-pytorch | 12e317c65df5306235da6bf2e0d872babbe5cf65 | [
"MIT"
] | null | null | null | run_video_feature_from_clips.py | MYusha/video-classification-3d-cnn-pytorch | 12e317c65df5306235da6bf2e0d872babbe5cf65 | [
"MIT"
] | null | null | null | run_video_feature_from_clips.py | MYusha/video-classification-3d-cnn-pytorch | 12e317c65df5306235da6bf2e0d872babbe5cf65 | [
"MIT"
] | null | null | null | import os
import sys
import json
import pdb
import subprocess
import numpy as np
if __name__ == '__main__':
result_json = sys.argv[1]
feature_root_path = sys.argv[2]
with open(os.path.join(feature_root_path,result_json), 'r') as f:
data = json.load(f)
n_vids = len(data)
# pdb.set_trace()
print('number of total video features included is', n_vids)
for clip_data in data:
# clip_data contains features from same video's clips
vid = clip_data['video']
n_clips = len(clip_data['clips'])
vid_feature = []
for clip in clip_data['clips']:
# clip is a dictionary with keys "segment" and "features"
vid_feature.append(clip['features'])
vid_feature = np.asarray(vid_feature)
mean_feature = np.mean(vid_feature,axis=0)
# assert mean_feature.ndim == 1
print mean_feature.shape
| 30.066667 | 69 | 0.646341 |
51187c06b15c9de86c351bbf23684aa932e3405e | 390 | py | Python | log.py | pedro-gutierrez/opentact-demo | 80a9a915973041369e23b6cd6550c66bd1f0dc39 | [
"MIT"
] | null | null | null | log.py | pedro-gutierrez/opentact-demo | 80a9a915973041369e23b6cd6550c66bd1f0dc39 | [
"MIT"
] | null | null | null | log.py | pedro-gutierrez/opentact-demo | 80a9a915973041369e23b6cd6550c66bd1f0dc39 | [
"MIT"
] | null | null | null | from sanic import Sanic
from sanic.config import LOGGING
import logging
LOGGING['loggers']={}
LOGGING['handlers']={}
logging_format = "[%(asctime)s] %(process)d-%(levelname)s "
logging_format += "%(module)s::%(funcName)s():l%(lineno)d: "
logging_format += "%(message)s"
logging.basicConfig(
format=logging_format,
level=logging.DEBUG
)
def log():
return logging.getLogger()
| 20.526316 | 60 | 0.7 |
3e47ae6971a97a9ac8fc48a88c6dd7d82e2fb4b7 | 52,558 | py | Python | clinica/iotools/utils/data_handling.py | ravih18/clinica | 07dfe5ba3bab5852a220dba2c88ab0c5132ef26e | [
"MIT"
] | null | null | null | clinica/iotools/utils/data_handling.py | ravih18/clinica | 07dfe5ba3bab5852a220dba2c88ab0c5132ef26e | [
"MIT"
] | 2 | 2020-04-10T16:53:03.000Z | 2020-04-14T20:05:54.000Z | clinica/iotools/utils/data_handling.py | mdiazmel/clinica | f4d4b368a7fea5815d0c412d5f7870913b6395d7 | [
"MIT"
] | null | null | null | """Data handling scripts."""
import click
def compute_default_filename(out_path):
from os import path
abspath = path.abspath(out_path)
# If given path is a directory, append a filename
if path.isdir(abspath):
tsv_path = path.join(out_path, "merge.tsv")
elif "." not in path.basename(abspath):
tsv_path = f"{out_path}.tsv"
else:
if path.splitext(out_path)[1] != ".tsv":
raise TypeError("Output path extension must be tsv.")
tsv_path = out_path
return tsv_path
def create_merge_file(
bids_dir,
out_tsv,
caps_dir=None,
tsv_file=None,
pipelines=None,
ignore_scan_files=False,
ignore_sessions_files=False,
**kwargs,
):
"""Merge all the TSV files containing clinical data of a BIDS compliant dataset and store the result inside a TSV file.
Args:
bids_dir: path to the BIDS folder
out_tsv: path to the output tsv file
caps_dir: path to the CAPS folder (optional)
tsv_file: TSV file containing the subjects with their sessions (optional)
ignore_scan_files: If True the information related to scans is not read (optional)
ignore_sessions_files: If True the information related to sessions and scans is not read (optional)
pipelines: when adding CAPS information, indicates the pipelines that will be merged (optional)
"""
import json
import os
from os import path
import numpy as np
import pandas as pd
from clinica.utils.participant import get_subject_session_list
from clinica.utils.stream import cprint
from .pipeline_handling import DatasetError
if caps_dir is not None:
if not path.isdir(caps_dir):
raise IOError("The path to the CAPS directory is wrong")
if not os.path.isfile(path.join(bids_dir, "participants.tsv")):
raise IOError("participants.tsv not found in the specified BIDS directory")
participants_df = pd.read_csv(path.join(bids_dir, "participants.tsv"), sep="\t")
sessions, subjects = get_subject_session_list(
bids_dir, ss_file=tsv_file, use_session_tsv=True
)
sub_ses_df = pd.DataFrame(
[[subject, session] for subject, session in zip(subjects, sessions)],
columns=["participant_id", "session_id"],
)
sub_ses_df.set_index(["participant_id", "session_id"], inplace=True)
out_path = compute_default_filename(out_tsv)
out_dir = path.dirname(out_path)
if len(out_dir) > 0:
os.makedirs(out_dir, exist_ok=True)
merged_df = pd.DataFrame(columns=participants_df.columns.values)
# BIDS part
for subject, subject_df in sub_ses_df.groupby(level=0):
sub_path = path.join(bids_dir, subject)
row_participant_df = participants_df[
participants_df["participant_id"] == subject
]
row_participant_df.reset_index(inplace=True, drop=True)
if len(row_participant_df) == 0:
cprint(
msg=f"Participant {subject} does not exist in participants.tsv",
lvl="warning",
)
row_participant_df = pd.DataFrame([[subject]], columns=["participant_id"])
if ignore_sessions_files:
for _, session in subject_df.index.values:
row_session_df = pd.DataFrame([[session]], columns=["session_id"])
row_df = pd.concat([row_participant_df, row_session_df], axis=1)
merged_df = merged_df.append(row_df)
else:
sessions_df = pd.read_csv(
path.join(sub_path, f"{subject}_sessions.tsv"), sep="\t"
)
for _, session in subject_df.index.values:
row_session_df = sessions_df[sessions_df.session_id == session]
row_session_df.reset_index(inplace=True, drop=True)
if len(row_session_df) == 0:
raise DatasetError(
sessions_df.loc[0, "session_id"] + " / " + session
)
# Read scans TSV files
scan_path = path.join(
bids_dir,
subject,
session,
f"{subject}_{session}_scans.tsv",
)
if path.isfile(scan_path) and not ignore_scan_files:
scans_dict = dict()
scans_df = pd.read_csv(scan_path, sep="\t")
for idx in scans_df.index.values:
filepath = scans_df.loc[idx, "filename"]
if filepath.endswith(".nii.gz"):
filename = path.basename(filepath).split(".")[0]
modality = "_".join(filename.split("_")[2::])
for col in scans_df.columns.values:
if col == "filename":
pass
else:
value = scans_df.loc[idx, col]
new_col_name = f"{modality}_{col}"
scans_dict.update({new_col_name: value})
json_path = path.join(
bids_dir,
subject,
session,
filepath.split(".")[0] + ".json",
)
if path.exists(json_path):
with open(json_path, "r") as f:
json_dict = json.load(f)
for key, value in json_dict.items():
new_col_name = f"{modality}_{key}"
scans_dict.update({new_col_name: value})
scans_dict = {
str(key): str(value) for key, value in scans_dict.items()
}
row_scans_df = pd.DataFrame(scans_dict, index=[0])
else:
row_scans_df = pd.DataFrame()
row_df = pd.concat(
[row_participant_df, row_session_df, row_scans_df], axis=1
)
merged_df = merged_df.append(row_df)
# Put participant_id and session_id first
col_list = merged_df.columns.values.tolist()
col_list.insert(0, col_list.pop(col_list.index("participant_id")))
col_list.insert(1, col_list.pop(col_list.index("session_id")))
merged_df = merged_df[col_list]
tmp = merged_df.select_dtypes(include=[np.number])
# Round numeric values in dataframe to 6 decimal values
merged_df.loc[:, tmp.columns] = np.round(tmp, 6)
merged_df.to_csv(out_path, sep="\t", index=False)
cprint("End of BIDS information merge.", lvl="debug")
merged_df.reset_index(drop=True, inplace=True)
# CAPS
if caps_dir is not None:
# Call the different pipelines
from .pipeline_handling import (
pet_volume_pipeline,
t1_freesurfer_pipeline,
t1_volume_pipeline,
)
pipeline_options = {
"t1-volume": t1_volume_pipeline,
"pet-volume": pet_volume_pipeline,
"t1-freesurfer": t1_freesurfer_pipeline,
}
merged_summary_df = pd.DataFrame()
if not pipelines:
for pipeline_name, pipeline_fn in pipeline_options.items():
merged_df, summary_df = pipeline_fn(caps_dir, merged_df, **kwargs)
if summary_df is not None and not summary_df.empty:
merged_summary_df = pd.concat([merged_summary_df, summary_df])
if summary_df is None or summary_df.empty:
cprint(
f"{pipeline_name} outputs were not found in the CAPS folder."
)
else:
for pipeline in pipelines:
merged_df, summary_df = pipeline_options[pipeline](
caps_dir, merged_df, **kwargs
)
merged_summary_df = pd.concat([merged_summary_df, summary_df])
n_atlas = len(merged_summary_df)
if n_atlas == 0:
raise FileNotFoundError(
"No outputs were found for any pipeline in the CAPS folder. "
"The output only contains BIDS information."
)
columns = merged_df.columns.values.tolist()
merged_summary_df.reset_index(inplace=True, drop=True)
for idx in merged_summary_df.index:
first_column_name = merged_summary_df.loc[idx, "first_column_name"]
last_column_name = merged_summary_df.loc[idx, "last_column_name"]
merged_summary_df.loc[idx, "first_column_index"] = columns.index(
first_column_name
)
merged_summary_df.loc[idx, "last_column_index"] = columns.index(
last_column_name
)
summary_path = path.splitext(out_path)[0] + "_summary.tsv"
merged_summary_df.to_csv(summary_path, sep="\t", index=False)
tmp = merged_df.select_dtypes(include=[np.number])
# Round numeric values in dataframe to 12 floating point values
merged_df.loc[:, tmp.columns] = np.round(tmp, 12)
merged_df.to_csv(out_path, sep="\t")
cprint("End of CAPS information merge.", lvl="debug")
def find_mods_and_sess(bids_dir):
"""Find all the modalities and sessions available for a given BIDS dataset.
Args:
bids_dir: path to the BIDS dataset
Returns:
mods_dict: a dictionary that stores the sessions and modalities found and has the following structure.
Example:
{
'sessions': ['ses-M00', 'ses-M18'],
'fmap': ['fmap'],
'anat': ['flair', 't1w'],
'func': ['func_task-rest'],
'dwi': ['dwi']
}
"""
import os
from glob import glob
from os import path
mods_dict = {}
mods_list = []
subjects_paths_lists = glob(path.join(bids_dir, "*sub-*"))
for sub_path in subjects_paths_lists:
ses_paths = glob(path.join(sub_path, "*ses-*"))
for session in ses_paths:
ses_name = session.split(os.sep)[-1]
mods_avail = []
if "sessions" in mods_dict:
if ses_name not in mods_dict["sessions"]:
mods_dict["sessions"].append(ses_name)
else:
mods_dict.update({"sessions": [ses_name]})
mods_paths_folders = glob(path.join(session, "*/"))
for p in mods_paths_folders:
p = p[:-1]
mods_avail.append(p.split("/").pop())
if "func" in mods_avail:
list_funcs_paths = glob(path.join(session, "func", "*bold.nii.gz"))
for func_path in list_funcs_paths:
func_name = func_path.split(os.sep)[-1]
func_name_tokens = func_name.split("_")
func_task = func_name_tokens[2]
if "func" in mods_dict:
if "func_" + func_task not in mods_dict["func"]:
mods_dict["func"].append("func_" + func_task)
else:
mods_dict.update({"func": ["func_" + func_task]})
if "func_" + func_task not in mods_list:
mods_list.append("func_" + func_task)
if "dwi" in mods_avail:
if "dwi" not in mods_dict:
mods_dict.update({"dwi": ["dwi"]})
if "dwi" not in mods_list:
mods_list.append("dwi")
if "fmap" in mods_avail:
if "fmap" not in mods_dict:
mods_dict.update({"fmap": ["fmap"]})
if "fmap" not in mods_list:
mods_list.append("fmap")
if "pet" in mods_avail:
list_pet_paths = glob(path.join(session, "pet", "*pet.nii.gz"))
for pet_path in list_pet_paths:
pet_name = pet_path.split(os.sep)[-1].split(".")[0]
pet_name_tokens = pet_name.split("_")
pet_acq = pet_name_tokens[3]
if "pet" in mods_dict:
if "pet_" + pet_acq not in mods_dict["pet"]:
mods_dict["pet"].append("pet_" + pet_acq)
else:
mods_dict.update({"pet": ["pet_" + pet_acq]})
if "pet_" + pet_acq not in mods_list:
mods_list.append("pet_" + pet_acq)
if "anat" in mods_avail:
anat_files_paths = glob(path.join(session, "anat", "*"))
for anat_file in anat_files_paths:
anat_name = anat_file.split(os.sep)[-1]
# Extract the name of the file without the extension
if ".nii.gz" in anat_name:
anat_name = anat_name.replace(".nii.gz", "")
anat_ext = "nii.gz"
else:
anat_name = os.path.splitext(anat_name.split(os.sep)[-1])[0]
anat_ext = os.path.splitext(anat_name.split(os.sep)[-1])[1]
if anat_ext != "json":
file_parts = anat_name.split("_")
anat_type = str.lower(file_parts[len(file_parts) - 1])
if "anat" in mods_dict:
if anat_type not in mods_dict["anat"]:
anat_aval = mods_dict["anat"]
anat_aval.append(anat_type)
mods_dict.update({"anat": anat_aval})
else:
mods_dict.update({"anat": [anat_type]})
if anat_type not in mods_list:
mods_list.append(anat_type)
return mods_dict
def compute_missing_processing(bids_dir, caps_dir, out_file):
"""
Compute the list of missing processing for each subject in a CAPS compliant dataset
Args:
bids_dir: path to the BIDS directory.
caps_dir: path to the CAPS directory.
out_file: path to the output file (filename included).
"""
from glob import glob
from os import listdir, path, sep
import pandas as pd
if path.exists(path.join(caps_dir, "groups")):
groups = listdir(path.join(caps_dir, "groups"))
else:
groups = list()
output_df = pd.DataFrame()
# Retrieve pet tracers avail
mods_and_sess = find_mods_and_sess(bids_dir)
mods_and_sess.pop("sessions")
mods_avail_dict = mods_and_sess
trc_avail = [
j.split("_")[1] for i in mods_avail_dict.values() for j in i if "pet" in j
]
print(trc_avail)
subjects_paths = glob(path.join(caps_dir, "subjects", "sub-*"))
for subject_path in subjects_paths:
participant_id = subject_path.split(sep)[-1]
sessions_paths = glob(path.join(subject_path, "ses-*"))
for session_path in sessions_paths:
session_id = session_path.split(sep)[-1]
row_df = pd.DataFrame(
[[participant_id, session_id]], columns=["participant_id", "session_id"]
)
# Check t1-volume outputs
if path.exists(path.join(session_path, "t1", "spm", "segmentation")):
row_df.loc[0, "t1-volume-segmentation"] = "1"
for group in groups:
group_id = group.split("-")[-1]
if path.exists(
path.join(
session_path,
"t1",
"spm",
"dartel",
group,
f"{participant_id}_{session_id}_T1w_target-{group_id}_transformation-forward_deformation.nii.gz",
)
):
row_df.loc[0, f"t1-volume-register-dartel_{group}"] = "1"
dartel2mni = glob(
path.join(
session_path,
"t1",
"spm",
"dartel",
group,
f"{participant_id}_{session_id}_T1w_segm-*_space-Ixi549Space_modulated-*_probability.nii.gz",
)
)
if len(dartel2mni) > 0:
row_df.loc[0, f"t1-volume-dartel2mni_{group}"] = "1"
if path.exists(
path.join(
session_path,
"t1",
"spm",
"dartel",
group,
"atlas_statistics",
)
):
row_df.loc[0, f"t1-volume-parcellation_{group}"] = "1"
else:
row_df.loc[0, f"t1-volume-parcellation_{group}"] = "0"
else:
row_df.loc[0, f"t1-volume-dartel2mni_{group}"] = "0"
row_df.loc[0, f"t1-volume-parcellation_{group}"] = "0"
else:
row_df.loc[0, f"t1-volume-register-dartel_{group}"] = "0"
row_df.loc[0, f"t1-volume-dartel2mni_{group}"] = "0"
row_df.loc[0, f"t1-volume-parcellation_{group}"] = "0"
else:
row_df.loc[0, "t1-volume-segmentation"] = "0"
for group in groups:
row_df.loc[0, f"t1-volume-register-dartel_{group}"] = "0"
row_df.loc[0, f"t1-volume-dartel2mni_{group}"] = "0"
row_df.loc[0, f"t1-volume-parcellation_{group}"] = "0"
# Check t1-linear outputs
if path.exists(path.join(session_path, "t1_linear")):
row_df.loc[0, "t1-linear"] = "1"
else:
row_df.loc[0, "t1-linear"] = "0"
# Check t1-freesurfer outputs
if path.exists(path.join(session_path, "t1", "freesurfer_cross_sectional")):
row_df.loc[0, "t1-freesurfer"] = "1"
else:
row_df.loc[0, "t1-freesurfer"] = "0"
# Check pet-volume outputs
for group in groups:
for trc in trc_avail:
for pvc in [True, False]:
pet_pattern = path.join(
session_path, "pet", "preprocessing", group, f"*{trc}*"
)
pet_paths = glob(pet_pattern)
if pvc:
pet_paths = [
pet_path for pet_path in pet_paths if "pvc" in pet_path
]
else:
pet_paths = [
pet_path
for pet_path in pet_paths
if "pvc" not in pet_path
]
if len(pet_paths) > 0:
row_df.loc[0, f"pet-volume_{trc}_{group}_pvc-{pvc}"] = "1"
else:
row_df.loc[0, f"pet-volume_{trc}_{group}_pvc-{pvc}"] = "0"
# Check pet-surface outputs
for trc in trc_avail:
pet_pattern = path.join(session_path, "pet", "surface", f"*{trc}*")
if len(glob(pet_pattern)) > 0:
row_df.loc[0, f"pet-surface_{trc}"] = "1"
else:
row_df.loc[0, f"pet-surface_{trc}"] = "0"
output_df = pd.concat([output_df, row_df])
output_df.sort_values(["participant_id", "session_id"], inplace=True)
output_df.to_csv(out_file, sep="\t", index=False)
def compute_missing_mods(bids_dir, out_dir, output_prefix=""):
"""Compute the list of missing modalities for each subject in a BIDS compliant dataset.
Args:
bids_dir: path to the BIDS directory
out_dir: path to the output folder
output_prefix: string that replace the default prefix ('missing_mods_') in the name of all the output files
created
"""
import os
from glob import glob
from os import path
import pandas as pd
from ..converter_utils import (
MissingModsTracker,
print_longitudinal_analysis,
print_statistics,
)
os.makedirs(out_dir, exist_ok=True)
# Find all the modalities and sessions available for the input dataset
mods_and_sess = find_mods_and_sess(bids_dir)
sessions_found = mods_and_sess["sessions"]
mods_and_sess.pop("sessions")
mods_avail_dict = mods_and_sess
mods_avail = [j for i in mods_avail_dict.values() for j in i]
cols_dataframe = mods_avail[:]
cols_dataframe.insert(0, "participant_id")
mmt = MissingModsTracker(sessions_found, mods_avail)
if output_prefix == "":
out_file_name = "missing_mods_"
else:
out_file_name = output_prefix + "_"
summary_file = open(path.join(out_dir, out_file_name + "summary.txt"), "w")
analysis_file = open(path.join(out_dir, "analysis.txt"), "w")
missing_mods_df = pd.DataFrame(columns=cols_dataframe)
row_to_append_df = pd.DataFrame(columns=cols_dataframe)
subjects_paths_lists = glob(path.join(bids_dir, "*sub-*"))
subjects_paths_lists.sort()
if len(subjects_paths_lists) == 0:
raise IOError("No subjects found or dataset not BIDS complaint.")
# Check the modalities available for each session
for ses in sessions_found:
for sub_path in subjects_paths_lists:
mods_avail_bids = []
subj_id = sub_path.split(os.sep)[-1]
row_to_append_df["participant_id"] = pd.Series(subj_id)
ses_path_avail = glob(path.join(sub_path, ses))
if len(ses_path_avail) == 0:
mmt.increase_missing_ses(ses)
for mod in mods_avail:
row_to_append_df[mod] = pd.Series("0")
else:
ses_path = ses_path_avail[0]
mods_paths_folders = glob(path.join(ses_path, "*/"))
for p in mods_paths_folders:
p = p[:-1]
mods_avail_bids.append(p.split("/").pop())
# Check if a modality folder is available and if is empty
if "func" in mods_avail_bids:
# Extract all the task available
for m in mods_avail_dict["func"]:
tokens = m.split("_")
task_name = tokens[1]
task_avail_list = glob(
path.join(ses_path, "func", "*" + task_name + "*")
)
if len(task_avail_list) == 0:
row_to_append_df[m] = pd.Series("0")
else:
row_to_append_df[m] = pd.Series("1")
# If the folder is not available but the modality is
# in the list of the available one mark it as missing
else:
if "func" in mods_avail_dict:
for m in mods_avail_dict["func"]:
row_to_append_df[m] = pd.Series("0")
mmt.add_missing_mod(ses, m)
if "dwi" in mods_avail_bids:
row_to_append_df["dwi"] = pd.Series("1")
else:
if "dwi" in mods_avail:
row_to_append_df["dwi"] = pd.Series("0")
mmt.add_missing_mod(ses, "dwi")
if "anat" in mods_avail_bids:
for m in mods_avail_dict["anat"]:
anat_aval_list = glob(path.join(ses_path, "anat", "*.nii.gz"))
anat_aval_list = [
elem for elem in anat_aval_list if m.lower() in elem.lower()
]
if len(anat_aval_list) > 0:
row_to_append_df[m] = pd.Series("1")
else:
row_to_append_df[m] = pd.Series("0")
mmt.add_missing_mod(ses, m)
else:
if "anat" in mods_avail_dict:
for m in mods_avail_dict["anat"]:
row_to_append_df[m] = pd.Series("0")
mmt.add_missing_mod(ses, m)
if "fmap" in mods_avail_bids:
row_to_append_df["fmap"] = pd.Series("1")
else:
if "fmap" in mods_avail:
row_to_append_df["fmap"] = pd.Series("0")
mmt.add_missing_mod(ses, "fmap")
if "pet" in mods_avail_bids:
# Extract all the task available
for m in mods_avail_dict["pet"]:
tokens = m.split("_")
pet_acq = tokens[1]
acq_avail_list = glob(
path.join(ses_path, "pet", "*" + pet_acq + "*")
)
if len(acq_avail_list) == 0:
row_to_append_df[m] = pd.Series("0")
else:
row_to_append_df[m] = pd.Series("1")
# If the folder is not available but the modality is
# in the list of the available one mark it as missing
else:
if "pet" in mods_avail_dict:
for m in mods_avail_dict["pet"]:
row_to_append_df[m] = pd.Series("0")
mmt.add_missing_mod(ses, m)
missing_mods_df = missing_mods_df.append(row_to_append_df)
row_to_append_df = pd.DataFrame(columns=cols_dataframe)
missing_mods_df = missing_mods_df[cols_dataframe]
missing_mods_df.to_csv(
path.join(out_dir, out_file_name + ses + ".tsv"),
sep="\t",
index=False,
encoding="utf-8",
)
missing_mods_df = pd.DataFrame(columns=cols_dataframe)
print_statistics(summary_file, len(subjects_paths_lists), sessions_found, mmt)
print_longitudinal_analysis(
analysis_file, bids_dir, out_dir, sessions_found, out_file_name
)
def create_subs_sess_list(
input_dir, output_dir, file_name=None, is_bids_dir=True, use_session_tsv=False
):
"""Create the file subject_session_list.tsv that contains the list of the visits for each subject for a BIDS or CAPS compliant dataset.
Args:
input_dir (str): Path to the BIDS or CAPS directory.
output_dir (str): Path to the output directory
file_name: name of the output file
is_bids_dir (boolean): Specify if input_dir is a BIDS directory or
not (i.e. a CAPS directory)
use_session_tsv (boolean): Specify if the list uses the sessions listed in the sessions.tsv files
"""
import os
from glob import glob
from os import path
import pandas as pd
os.makedirs(output_dir, exist_ok=True)
if not file_name:
file_name = "subjects_sessions_list.tsv"
subjs_sess_tsv = open(path.join(output_dir, file_name), "w")
subjs_sess_tsv.write("participant_id" + "\t" + "session_id" + "\n")
if is_bids_dir:
path_to_search = input_dir
else:
path_to_search = path.join(input_dir, "subjects")
subjects_paths = glob(path.join(path_to_search, "*sub-*"))
# Sort the subjects list
subjects_paths.sort()
if len(subjects_paths) == 0:
raise IOError("Dataset empty or not BIDS/CAPS compliant.")
for sub_path in subjects_paths:
subj_id = sub_path.split(os.sep)[-1]
if use_session_tsv:
session_df = pd.read_csv(
path.join(sub_path, subj_id + "_sessions.tsv"), sep="\t"
)
session_list = list(session_df["session_id"].to_numpy())
for session in session_list:
subjs_sess_tsv.write(subj_id + "\t" + session + "\n")
else:
sess_list = glob(path.join(sub_path, "*ses-*"))
for ses_path in sess_list:
session_name = ses_path.split(os.sep)[-1]
subjs_sess_tsv.write(subj_id + "\t" + session_name + "\n")
subjs_sess_tsv.close()
def center_nifti_origin(input_image, output_image):
"""Put the origin of the coordinate system at the center of the image.
Args:
input_image: path to the input image
output_image: path to the output image (where the result will be stored)
Returns:
path of the output image created
"""
import os
from os.path import isfile
import nibabel as nib
import numpy as np
from nibabel.spatialimages import ImageFileError
error_str = None
try:
img = nib.load(input_image)
except FileNotFoundError:
error_str = f"No such file {input_image}"
except ImageFileError:
error_str = f"File {input_image} could not be read"
except Exception as e:
error_str = f"File {input_image} could not be loaded with nibabel: {e}"
if not error_str:
try:
canonical_img = nib.as_closest_canonical(img)
hd = canonical_img.header
qform = np.zeros((4, 4))
for i in range(1, 4):
qform[i - 1, i - 1] = hd["pixdim"][i]
qform[i - 1, 3] = -1.0 * hd["pixdim"][i] * hd["dim"][i] / 2.0
new_img = nib.Nifti1Image(
canonical_img.get_data(caching="unchanged"), affine=qform, header=hd
)
# Without deleting already-existing file, nib.save causes a severe bug on Linux system
if isfile(output_image):
os.remove(output_image)
nib.save(new_img, output_image)
if not isfile(output_image):
error_str = (
f"NIfTI file created but Clinica could not save it to {output_image}. "
"Please check that the output folder has the correct permissions."
)
except Exception as e:
error_str = (
"File "
+ input_image
+ " could not be processed with nibabel: "
+ str(e)
)
return output_image, error_str
def center_all_nifti(bids_dir, output_dir, modality, center_all_files=False):
"""Center all the NIfTI images of the input BIDS folder into the empty output_dir specified in argument.
All the files from bids_dir are copied into output_dir, then all the NIfTI images we can found are replaced by their
centered version if their center if off the origin by more than 50 mm.
Args:
bids_dir: (str) path to bids directory
output_dir: (str) path to EMPTY output directory
modality: (list of str) modalities to convert
center_all_files: (bool) center only files that may cause problem for SPM if false. If true, center all NIfTI
Returns:
List of the centered files
"""
from glob import glob
from os import listdir
from os.path import basename, isdir, isfile, join
from shutil import copy, copy2, copytree
from clinica.utils.exceptions import ClinicaBIDSError
from clinica.utils.inputs import check_bids_folder
# output and input must be different, so that we do not mess with user's data
if bids_dir == output_dir:
raise ClinicaBIDSError("Input BIDS and output directories must be different")
# check that input is a BIDS dir
check_bids_folder(bids_dir)
for f in listdir(bids_dir):
if isdir(join(bids_dir, f)) and not isdir(join(output_dir, f)):
copytree(join(bids_dir, f), join(output_dir, f), copy_function=copy)
elif isfile(join(bids_dir, f)) and not isfile(join(output_dir, f)):
copy(join(bids_dir, f), output_dir)
pattern = join(output_dir, "**/*.nii*")
nifti_files = glob(pattern, recursive=True)
# Now filter this list by elements in modality list
# For each file:
# if any modality name (lowercase) is found in the basename of the file:
# keep the file
nifti_files_filtered = [
f
for f in nifti_files
if any(elem.lower() in basename(f).lower() for elem in modality)
]
# Remove those who are centered
if not center_all_files:
nifti_files_filtered = [
file for file in nifti_files_filtered if not is_centered(file)
]
all_errors = []
for f in nifti_files_filtered:
print("Handling " + f)
_, current_error = center_nifti_origin(f, f)
if current_error:
all_errors.append(current_error)
if len(all_errors) > 0:
final_error_msg = (
"Clinica encoutered "
+ str(len(all_errors))
+ " error(s) while trying to center all NIfTI images.\n"
)
for error in all_errors:
final_error_msg += "\n" + error
raise RuntimeError(final_error_msg)
return nifti_files_filtered
def are_far_appart(file1, file2, threshold=80):
"""Tell if 2 files have a center located at more than a threshold distance.
Args:
file1: (str) path to the first nifti file
file2: (str) path to the second nifti file
threshold: threshold to consider whether 2 files are too far appart
Returns:
True if distance between `file1` and `file2` is greter than `threshold`, False otherwise.
"""
from os.path import isfile
import numpy as np
assert isfile(file1)
assert isfile(file2)
center1 = get_world_coordinate_of_center(file1)
center2 = get_world_coordinate_of_center(file2)
return np.linalg.norm(center2 - center1, ord=2) > threshold
def write_list_of_files(file_list, output_file):
"""Save `file_list` list of files into `output_file` text file.
Args:
file_list: (list of str) of path to files
output_file: (str) path to the output txt file
Returns:
output_file
"""
from os.path import isfile
assert isinstance(file_list, list), "First argument must be a list"
assert isinstance(output_file, str), "Second argument must be a str"
if isfile(output_file):
return None
text_file = open(output_file, "w+")
for created_file in file_list:
text_file.write(created_file + "\n")
text_file.close()
return output_file
def check_relative_volume_location_in_world_coordinate_system(
label_1,
nifti_list1,
label_2,
nifti_list2,
bids_dir,
modality,
skip_question=False,
):
"""
Check if the NIfTI file list nifti_list1 and nifti_list2 provided in argument are not too far apart (otherwise coreg
in SPM may fail. Norm between center of volumes of 2 files must be less than 80 mm.
Args:
label_1: label of the first nifti_list1 files (used in potential warning message)
nifti_list1: first set of files
label_2: label of the second nifti_list
nifti_list2: second set of files, must be same length as nifti_list1
bids_dir: bids directory (used in potential warning message)
modality: string that must be used in argument of: clinica iotools bids --modality MODALITY (used in potential
warning message)
"""
import sys
from os.path import abspath, basename
import numpy as np
from clinica.utils.stream import cprint
center_coordinate_1 = [get_world_coordinate_of_center(file) for file in nifti_list1]
center_coordinate_2 = [get_world_coordinate_of_center(file) for file in nifti_list2]
l2_norm = [
np.linalg.norm(center_1 - center_2)
for center_1, center_2 in zip(center_coordinate_1, center_coordinate_2)
]
pairs_with_problems = [i for i, norm in enumerate(l2_norm) if norm > 80]
if len(pairs_with_problems) > 0:
warning_message = (
f"It appears that {str(len(pairs_with_problems))} "
"pairs of files have an important relative offset. "
"SPM coregistration has a high probability to fail on these files:\n\n"
)
# File column width : 3 spaces more than the longest string to display
file1_width = max(
3 + len(label_1),
3
+ max(
len(basename(file))
for file in [nifti_list1[k] for k in pairs_with_problems]
),
)
file2_width = max(
3 + len(label_2),
3
+ max(
len(basename(file))
for file in [nifti_list2[k] for k in pairs_with_problems]
),
)
norm_width = len("Relative distance")
warning_message += (
"%-"
+ str(file1_width)
+ "s%-"
+ str(file2_width)
+ "s%-"
+ str(norm_width)
+ "s"
) % (label_1, label_2, "Relative distance")
warning_message += "\n" + "-" * (file1_width + file2_width + norm_width) + "\n"
for file1, file2, norm in zip(
[nifti_list1[k] for k in pairs_with_problems],
[nifti_list2[k] for k in pairs_with_problems],
[l2_norm[k] for k in pairs_with_problems],
):
# Nice formatting as array
# % escape character
# - aligned to the left, with the size of the column
# s = string, f = float
# . for precision with float
# https://docs.python.org/2/library/stdtypes.html#string-formatting for more information
warning_message += (
"%-"
+ str(file1_width)
+ "s%-"
+ str(file2_width)
+ "s%-"
+ str(norm_width)
+ ".2f\n"
) % (str(basename(file1)), str(basename(file2)), norm)
warning_message += (
"\nClinica provides a tool to counter this problem by replacing the center "
"of the volume at the origin of the world coordinates.\nUse the following "
"command line to correct the header of the faulty NIFTI volumes in a new folder:\n\n"
f"`clinica iotools center-nifti {abspath(bids_dir)} {abspath(bids_dir)}_centered --modality {modality}`\n\n"
"You will find more information on the command by typing `clinica iotools center-nifti` in the console."
)
cprint(msg=warning_message, lvl="warning")
if not skip_question:
if not click.confirm("Do you still want to launch the pipeline?"):
click.echo("Clinica will now exit...")
sys.exit(0)
def check_volume_location_in_world_coordinate_system(
nifti_list, bids_dir, modality="t1w", skip_question=False
):
"""
Check if the NIfTI file list nifti_list provided in argument are aproximately centered around the origin of the
world coordinates. (Problem may arise with SPM segmentation
If yes, we warn the user of this problem, and propose him to exit clinica in order for him to run:
clinica iotools center-nifti ...
or to continue with the execution of the pipeline
Args:
nifti_list: (list of str) list of path to nifti files
bids_dir: (str) path to bids directory associated with this check (in order to propose directly the good
command line for center-nifti tool)
modality: (str) to propose directly the good command line option
skip_question: (bool) if True user input is not asked for and the answer is automatically yes
"""
import sys
from os.path import abspath, basename
import click
import numpy as np
list_non_centered_files = [file for file in nifti_list if not is_centered(file)]
if len(list_non_centered_files) > 0:
centers = [
get_world_coordinate_of_center(file) for file in list_non_centered_files
]
l2_norm = [np.linalg.norm(center, ord=2) for center in centers]
# File column width : 3 spaces more than the longest string to display
file_width = 3 + max(len(basename(file)) for file in list_non_centered_files)
# Center column width (with a fixed minimum size) : 3 spaces more than the longest string to display
center_width = max(
len("Coordinate of center") + 3,
3 + max(len(str(center)) for center in centers),
)
warning_message = (
f"It appears that {str(len(list_non_centered_files))} files "
"have a center way out of the origin of the world coordinate system. SPM has a high "
"probability to fail on these files (for coregistration or segmentation):\n\n"
)
warning_message += (
"%-" + str(file_width) + "s%-" + str(center_width) + "s%-s"
) % ("File", "Coordinate of center", "Distance to origin")
# 18 is the length of the string 'Distance to origin'
warning_message += "\n" + "-" * (file_width + center_width + 18) + "\n"
for file, center, l2 in zip(list_non_centered_files, centers, l2_norm):
# Nice formatting as array
# % escape character
# - aligned to the left, with the size of the column
# s = string, f = float
# . for precision with float
# https://docs.python.org/2/library/stdtypes.html#string-formatting for more information
warning_message += (
"%-" + str(file_width) + "s%-" + str(center_width) + "s%-25.2f\n"
) % (basename(file), str(center), l2)
cmd_line = f"`clinica iotools center-nifti {abspath(bids_dir)} {abspath(bids_dir)}_centered --modality {modality}`"
warning_message += (
"\nIf you are trying to launch the t1-freesurfer pipeline, you can ignore this message "
"if you do not want to run the pet-surface pipeline afterward."
)
warning_message += (
"\nClinica provides a tool to counter this problem by replacing the center of the volume"
" at the origin of the world coordinates.\nUse the following command line to correct the "
f"header of the faulty NIFTI volumes in a new folder:\n{cmd_line}"
"You will find more information on the command by typing "
"clinica iotools center-nifti in the console."
)
click.echo(warning_message)
if not skip_question:
if not click.confirm("Do you still want to launch the pipeline?"):
click.echo("Clinica will now exit...")
sys.exit(0)
def is_centered(nii_volume, threshold_l2=50):
"""Tell if a NIfTI volume is centered on the origin of the world coordinate system.
SPM has troubles to segment files if the center of the volume is not close from the origin of the world coordinate
system. A series of experiment have been conducted: we take a volume whose center is on the origin of the world
coordinate system. We add an offset using coordinates of affine matrix [0, 3], [1, 3], [2, 3] (or by modifying the
header['srow_x'][3], header['srow_y'][3], header['srow_z'][3], this is strictly equivalent).
It has been determined that volumes were still segmented with SPM when the L2 distance between origin and center of
the volume did not exceed 100 mm. Above this distance, either the volume is either not segmented (SPM error), or the
produced segmentation is wrong (not the shape of a brain anymore)
Args:
nii_volume: path to NIfTI volume
threshold_l2: maximum distance between origin of the world coordinate system and the center of the volume to
be considered centered. The threshold were SPM segmentation stops working is around 100 mm
(it was determined empirically after several trials on a genrated dataset), so default value is 50
mm in order to have a security margin, even when dealing with coregistred files afterward)
Returns:
True or False
"""
import numpy as np
center = get_world_coordinate_of_center(nii_volume)
# Compare to the threshold and retun boolean
# if center is a np.nan, comparison will be False, and False will be returned
distance_from_origin = np.linalg.norm(center, ord=2)
# if not np.isnan(distance_from_origin):
# print('\t' + basename(nii_volume) + ' has its center at {0:.2f} mm of the origin.'.format(distance_from_origin))
if distance_from_origin < threshold_l2:
return True
else:
# If center is a np.nan,
return False
def get_world_coordinate_of_center(nii_volume):
"""Extract the world coordinates of the center of the image.
Based on methods described here: https://brainder.org/2012/09/23/the-nifti-file-format/
Args:
nii_volume: path to nii volume
Returns:
[Returns]
"""
from os.path import isfile
import nibabel as nib
import numpy as np
from clinica.utils.stream import cprint
assert isinstance(nii_volume, str), "input argument nii_volume must be a str"
assert isfile(nii_volume), "input argument must be a path to a file"
try:
orig_nifti = nib.load(nii_volume)
except nib.filebasedimages.ImageFileError:
cprint(
msg=f"File {nii_volume} could not be read by nibabel. Is it a valid NIfTI file ?",
lvl="warning",
)
return np.nan
head = orig_nifti.header
if isinstance(head, nib.freesurfer.mghformat.MGHHeader):
# If MGH volume
center_coordinates_world = vox_to_world_space_method_3_bis(
head["dims"][0:3] / 2, head
)
else:
# Standard NIfTI volume
center_coordinates = get_center_volume(head)
if head["qform_code"] > 0:
center_coordinates_world = vox_to_world_space_method_2(
center_coordinates, head
)
elif head["sform_code"] > 0:
center_coordinates_world = vox_to_world_space_method_3(
center_coordinates, head
)
elif head["sform_code"] == 0:
center_coordinates_world = vox_to_world_space_method_1(
center_coordinates, head
)
else:
center_coordinates_world = np.nan
return center_coordinates_world
def get_center_volume(header):
"""Get the voxel coordinates of the center of the data, using header information.
Args:
header: a nifti header
Returns:
Voxel coordinates of the center of the volume
"""
import numpy as np
center_x = header["dim"][1] / 2
center_y = header["dim"][2] / 2
center_z = header["dim"][3] / 2
return np.array([center_x, center_y, center_z])
def vox_to_world_space_method_1(coordinates_vol, header):
"""
The Method 1 is for compatibility with analyze and is not supposed to be used as the main orientation method. But it
is used if sform_code = 0. The world coordinates are determined simply by scaling by the voxel size by their
dimension stored in pixdim. More information here: https://brainder.org/2012/09/23/the-nifti-file-format/
Args:
coordinates_vol: coordinate in the volume (raw data)
header: header object
Returns:
Coordinates in the world space
"""
import numpy as np
return np.array(coordinates_vol) * np.array(
header["pixdim"][1], header["pixdim"][2], header["pixdim"][3]
)
def vox_to_world_space_method_2(coordinates_vol, header):
"""
The Method 2 is used when short qform_code is larger than zero. To get the coordinates, we multiply a rotation
matrix (r_mat) by coordinates_vol, then perform hadamart with pixel dimension pixdim (like in method 1). Then we add
an offset (qoffset_x, qoffset_y, qoffset_z)
Args:
coordinates_vol: coordinate in the volume (raw data)
header: header object
Returns:
Coordinates in the world space
"""
import numpy as np
def get_r_matrix(h):
"""Get rotation matrix.
More information here: https://brainder.org/2012/09/23/the-nifti-file-format/
Args:
h: header
Returns:
Rotation matrix
"""
b = h["quatern_b"]
c = h["quatern_c"]
d = h["quatern_d"]
a = np.sqrt(1 - (b ** 2) - (c ** 2) - (d ** 2))
r = np.zeros((3, 3))
r[0, 0] = (a ** 2) + (b ** 2) - (c ** 2) - (d ** 2)
r[0, 1] = 2 * ((b * c) - (a * d))
r[0, 2] = 2 * ((b * d) + (a * c))
r[1, 0] = 2 * ((b * c) + (a * d))
r[1, 1] = (a ** 2) + (c ** 2) - (b ** 2) - (d ** 2)
r[1, 2] = 2 * ((c * d) - (a * b))
r[2, 0] = 2 * ((b * d) - (a * c))
r[2, 1] = 2 * ((b * d) - (a * c))
r[2, 2] = (a ** 2) + (d ** 2) - (b ** 2) - (c ** 2)
return r
i = coordinates_vol[0]
j = coordinates_vol[1]
k = coordinates_vol[2]
if header["qform_code"] > 0:
r_mat = get_r_matrix(header)
else:
# Should never be reached
raise ValueError("qform_code must be greater than 0 to use this method")
q = header["pixdim"][0]
if q not in [-1, 1]:
print("q was " + str(q), ", now is 1")
q = 1
return np.dot(r_mat, np.array([i, j, q * k])) * np.array(
header["pixdim"][1:4]
) + np.array([header["qoffset_x"], header["qoffset_y"], header["qoffset_z"]])
def vox_to_world_space_method_3(coordinates_vol, header):
"""
This method is used when sform_code is larger than zero. It relies on a full affine matrix, stored in the header in
the fields srow_[x,y,y], to map voxel to world coordinates.
When a nifti file is created with raw data and affine=..., this is this method that is used to decypher the
voxel-to-world correspondance.
Args:
coordinates_vol: coordinate in the volume (raw data)
header: header object
Returns:
Coordinates in the world space
"""
import numpy as np
def get_aff_matrix(h):
"""Get affine transformation matrix.
See details here: https://brainder.org/2012/09/23/the-nifti-file-format/
Args:
h: header
Returns:
affine transformation matrix
"""
mat = np.zeros((4, 4))
mat[0, 0] = h["srow_x"][0]
mat[0, 1] = h["srow_x"][1]
mat[0, 2] = h["srow_x"][2]
mat[0, 3] = h["srow_x"][3]
mat[1, 0] = h["srow_y"][0]
mat[1, 1] = h["srow_y"][1]
mat[1, 2] = h["srow_y"][2]
mat[1, 3] = h["srow_y"][3]
mat[2, 0] = h["srow_z"][0]
mat[2, 1] = h["srow_z"][1]
mat[2, 2] = h["srow_z"][2]
mat[2, 3] = h["srow_z"][3]
mat[3, 3] = 1
return mat
if header["sform_code"] > 0:
aff = get_aff_matrix(header)
else:
# Should never be reached
raise ValueError("sform_code has a value > 0, so method 3 cannot be used")
homogeneous_coord = np.concatenate(
(np.array(coordinates_vol), np.array([1])), axis=0
)
return np.dot(aff, homogeneous_coord)[0:3]
def vox_to_world_space_method_3_bis(coordinates_vol, header):
"""
This method relies on the same technique as method 3, but for images created by FreeSurfer (MGHImage, MGHHeader).
Args:
coordinates_vol: coordinate in the volume (raw data)
header: nib.freesurfer.mghformat.MGHHeader object
Returns:
Coordinates in the world space
"""
import numpy as np
affine_trensformation_matrix = header.get_affine()
homogeneous_coord = np.concatenate(
(np.array(coordinates_vol), np.array([1])), axis=0
)
return np.dot(affine_trensformation_matrix, homogeneous_coord)[0:3]
| 38.960712 | 139 | 0.566764 |
373f916e3095ce8f4c7e01e304c1b92337b022b7 | 2,076 | py | Python | src/regiones_at.py | zara-ms/python_class | 8a9529e14e84c3c2e3fd0a7c793fcb09471ea55c | [
"MIT"
] | null | null | null | src/regiones_at.py | zara-ms/python_class | 8a9529e14e84c3c2e3fd0a7c793fcb09471ea55c | [
"MIT"
] | null | null | null | src/regiones_at.py | zara-ms/python_class | 8a9529e14e84c3c2e3fd0a7c793fcb09471ea55c | [
"MIT"
] | 3 | 2021-04-09T18:40:26.000Z | 2021-09-07T01:15:03.000Z | """
## NAME
regiones_at.py
## VERSION
[1.0]
## AUTHOR
Zara Paulina Martinez Sanchez < zaram042001 @ gmail.com >
## DATE
[08/06/2021]
## DESCRIPTION
Programa que analiza una secuencia de DNA para buscar regiones ricas en AT
las cuales contengan 5 o mas As y/o Ts. En caso de contener en la secuencia
caracteres diferentes a ATGC se le notifica al usuario
## CATEGORY
Sequence analysis
## USAGE
regiones_at.py no requiere argumentos
## FUNCTIONS
def analizar_sec(seq):
no_bases = re.findall(r"[^ATGC]", seq)
region_at = re.findall(r"[AT]{5,}", seq)
try:
if no_bases:
raise ValueError
except ValueError:
print(f'La secuencia que ingresó cuenta con caracteres no validos: {no_bases}')
else:
if region_at:
print(f'Las regiones ricas en AT son: {region_at}')
else:
print("No existen regiones ricas en AT en su secuencia")
## EXAMPLES
Input:
CTGCATTATATCGTACGAAATTATACGCGCG
Output:
Las regiones ricas en AT son: ['ATTATAT', 'AAATTATA']
## GITHUB LINK
https://github.com/zara-ms/python_class/tree/master/src
"""
# Libreria a usar
import re
def analizar_sec(seq):
no_bases = re.findall(r"[^ATGC]", seq)
region_at = re.findall(r"[AT]{5,}", seq)
# Reconocer caracteres invalidos y marcar error
try:
if no_bases:
raise ValueError
except ValueError:
print(f'La secuencia que ingresó cuenta con caracteres no validos: {no_bases}')
# Buscar secuencias ricas en AT si la secuencia es correcta
else:
if region_at:
print(f'Las regiones ricas en AT son: {region_at}')
else:
print("No existen regiones ricas en AT en su secuencia")
print("Ingrese la secuencia a analizar")
secuencia = input()
secuencia = secuencia.upper()
# Llamar a la funcion
analizar_sec(secuencia)
| 22.322581 | 95 | 0.600674 |
b65565c8c7687e8761616c34843ff28ea97f91c4 | 332 | py | Python | server/password_text.py | cylindricalcow/physics_hackathon | 726df1bce391dc4b64c850b34fefbce71e25b4db | [
"MIT"
] | 2 | 2018-02-24T09:20:34.000Z | 2018-02-25T15:42:53.000Z | server/password_text.py | cylindricalcow/physics_hackathon | 726df1bce391dc4b64c850b34fefbce71e25b4db | [
"MIT"
] | null | null | null | server/password_text.py | cylindricalcow/physics_hackathon | 726df1bce391dc4b64c850b34fefbce71e25b4db | [
"MIT"
] | 4 | 2018-02-24T23:27:51.000Z | 2018-02-27T02:15:28.000Z | import hashlib
from authenticator import *
def hash(username, password, passwordHash):
return hashlib.md5(password).hexdigest()
realm = Realm()
myPortal = portal.Portal(realm)
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
checker.addUser("user", "pass")
checker = checkers.FilePasswordDB("passwords.txt", hash=hash) | 33.2 | 61 | 0.795181 |
6d0a64632e94913c0a9eb1c215922e6fc58dc738 | 1,812 | py | Python | src/states.py | matpalm/drivebot | 2b6c30209f7a50e289fa70b68fdc93f5e2bd7e88 | [
"MIT"
] | 70 | 2016-02-20T02:59:14.000Z | 2021-12-30T04:19:09.000Z | src/states.py | matpalm/drivebot | 2b6c30209f7a50e289fa70b68fdc93f5e2bd7e88 | [
"MIT"
] | 1 | 2016-05-03T15:57:58.000Z | 2016-05-04T13:55:53.000Z | src/states.py | matpalm/drivebot | 2b6c30209f7a50e289fa70b68fdc93f5e2bd7e88 | [
"MIT"
] | 17 | 2016-02-20T03:53:46.000Z | 2021-03-17T07:38:18.000Z | # utils for converting a stream of sensor data into a state suitable
# for a policy to use.
import copy
import itertools
import numpy as np
# given sonar readings just trivially return index of furthest away
# { F, L, R }
class FurthestSonar:
def state_given_new_ranges(self, ranges):
return [np.argmax(ranges)]
def reset(self):
pass
# given sonar readings return a state based on the 6 possible orderings.
# { FLR, FRL, LFR, LRF, RFL, RLf }
class OrderingSonars:
def state_given_new_ranges(self, ranges):
i_v_sorted_by_v = sorted(enumerate(ranges), key=lambda (i, v): -v)
just_i = [i for (i, v) in i_v_sorted_by_v]
return just_i
def reset(self):
pass
# standardises sonar values based on some (precomputed) mean / std
class StandardisedSonars:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def state_given_new_ranges(self, ranges):
return [(v-self.mean)/self.std for v in map(float, ranges)]
def reset(self):
pass
# wrap another sonar reader and keep track of last history_length entries.
class StateHistory:
def __init__(self, sonar_to_state, history_length):
self.history_length = history_length
self.sonar_to_state = sonar_to_state
self.reset()
def reset(self):
self.state_ = []
def state_given_new_ranges(self, ranges):
partial_state = self.sonar_to_state.state_given_new_ranges(ranges)
if len(self.state_) == 0:
# for first example just buffer up history #hack
for _ in range(self.history_length):
self.state_.append(partial_state)
else:
self.state_.append(partial_state)
self.state_.pop(0)
return copy.deepcopy(self.state_)
| 27.876923 | 74 | 0.663355 |
6b58bf971235f9e6e74e9a88dcbbedbe85ea106e | 5,341 | py | Python | test/azure/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/operations/_xms_client_request_id_operations.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/operations/_xms_client_request_id_operations.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/operations/_xms_client_request_id_operations.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class XMsClientRequestIdOperations(object):
"""XMsClientRequestIdOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get(
self, custom_headers=None, raw=False, **operation_config):
"""Get method that overwrites x-ms-client-request header with value
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
get.metadata = {'url': '/azurespecials/overwrite/x-ms-client-request-id/method/'}
def param_get(
self, x_ms_client_request_id, custom_headers=None, raw=False, **operation_config):
"""Get method that overwrites x-ms-client-request header with value
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:param x_ms_client_request_id: This should appear as a method
parameter, use value '9C4D50EE-2D56-4CD3-8152-34347DC9F2B0'
:type x_ms_client_request_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azurespecialproperties.models.ErrorException>`
"""
# Construct URL
url = self.param_get.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
param_get.metadata = {'url': '/azurespecials/overwrite/x-ms-client-request-id/via-param/method/'}
| 41.726563 | 140 | 0.671223 |
80d89767d8533d91581eddd1136a46d609a1c981 | 3,864 | py | Python | hmkit/autoapi/msg_type.py | highmobility/hmkit-python | 2ac06ed021b57014f5290eaece19a9399d52df48 | [
"MIT"
] | 1 | 2021-08-01T20:35:57.000Z | 2021-08-01T20:35:57.000Z | hmkit/autoapi/msg_type.py | highmobility/hmkit-python | 2ac06ed021b57014f5290eaece19a9399d52df48 | [
"MIT"
] | null | null | null | hmkit/autoapi/msg_type.py | highmobility/hmkit-python | 2ac06ed021b57014f5290eaece19a9399d52df48 | [
"MIT"
] | null | null | null | """
The MIT License
Copyright (c) 2014- High-Mobility GmbH (https://high-mobility.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import codecs
from . import *
#from . import commands
#from autoapi.commands import *
#from autoapi.commands.properties import *
from .identifiers import Identifiers
import logging
log = logging.getLogger('hmkit.autoapi')
class MsgType():
def __init__(self, identifier, msgtype):
"""
:param bytes identifier: Msg identifier bytes
:param int msgtype: message type integer value
:rtype: None
"""
#self.identifier_bytes = bytearray(codecs.encode(identifier.value, 'hex')) #codecs.encode(msg_id, 'hex')
#print("MsgType __init__, identifier: " + str(identifier.value) + "identifier typ: " + str(type(identifier.value)))
log.info("identifier: " + str(identifier) + " identifier typ: " + str(type(identifier)))
log.info("identifier.value: " + str(identifier.value) + " identifier typ: " + str(type(identifier.value)))
identifier_str = identifier.value.decode() # get the string equivalent of hex bytes
#log.info("identifier_str: " + str(identifier_str) + " Len: " + str(len(identifier_str)) + " type: " + str(type(identifier_str)))
self.identifier_bytes = bytearray.fromhex(identifier_str)
#self.identifier_bytes = bytearray()
#self.identifierbytes[0]
#self.identifier_bytes += self.identifierbytes[1]
self.type = msgtype
#print("MsgType __init__ identifier_bytes : " + str(self.identifier_bytes) + " Len: " + str(len(self.identifier_bytes)) + " typ: " + str(type(self.identifier_bytes)))
log.debug("identifier_bytes [0]: " + str(self.identifier_bytes[0]) + " ,[1]: " + str(self.identifier_bytes[1]))
log.debug("identifier_bytes [0] type: " + str(type(self.identifier_bytes[0])) )
log.debug("identifier_bytes type: " + str(type(self.identifier_bytes)) )
#log.info("self.type: " + str(self.type) + " type: " + str(type(self.type)))
self.identifier_and_type = self.identifier_bytes
self.identifier_and_type.append(msgtype)
log.debug("ID and Type: " + str(self.identifier_and_type) + " typ: " + str(type(self.identifier_and_type)))
def get_identifier_and_type(self):
"""
:rtype: None
"""
return self.identifier_and_type
def get_identifier_bytes(self):
"""
get the identifier bytes
:rtype: bytes
"""
return self.identifier_bytes
def get_identifier(self):
"""
get the identifier object
:rtype: identifiers.Identifiers
"""
return Identifiers(self.identifier_bytes)
def get_type(self):
"""
get the message type byte
:rtype: int
"""
return self.type
| 37.882353 | 176 | 0.680383 |
1a2ec4fda02beef4f3b97ff5f4100c3805bf6108 | 19,540 | py | Python | nsq/async.py | ericb/pynsq | cbbea95872c6e4a5c8da8d7ff64ce4857d94bfb4 | [
"MIT"
] | null | null | null | nsq/async.py | ericb/pynsq | cbbea95872c6e4a5c8da8d7ff64ce4857d94bfb4 | [
"MIT"
] | null | null | null | nsq/async.py | ericb/pynsq | cbbea95872c6e4a5c8da8d7ff64ce4857d94bfb4 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
import time
import socket
import logging
from ._compat import bytes_types, string_types
from ._compat import struct_l
from .version import __version__
try:
import ssl
except ImportError:
ssl = None # pyflakes.ignore
try:
from .snappy_socket import SnappySocket
except ImportError:
SnappySocket = None # pyflakes.ignore
try:
import simplejson as json
except ImportError:
import json # pyflakes.ignore
import tornado.iostream
import tornado.ioloop
try:
from tornado.simple_httpclient import _default_ca_certs as default_ca_certs
except ImportError:
# Tornado < 4
from tornado.simple_httpclient import _DEFAULT_CA_CERTS
def default_ca_certs():
return _DEFAULT_CA_CERTS
from nsq import event, protocol
from .deflate_socket import DeflateSocket
logger = logging.getLogger(__name__)
# states
INIT = 'INIT'
DISCONNECTED = 'DISCONNECTED'
CONNECTING = 'CONNECTING'
CONNECTED = 'CONNECTED'
DEFAULT_USER_AGENT = 'pynsq/%s' % __version__
class AsyncConn(event.EventedMixin):
"""
Low level object representing a TCP connection to nsqd.
When a message on this connection is requeued and the requeue delay
has not been specified, it calculates the delay automatically by an
increasing multiple of ``requeue_delay``.
Generates the following events that can be listened to with
:meth:`nsq.AsyncConn.on`:
* ``connect``
* ``close``
* ``error``
* ``identify``
* ``identify_response``
* ``auth``
* ``auth_response``
* ``heartbeat``
* ``ready``
* ``message``
* ``response``
* ``backoff``
* ``resume``
:param host: the host to connect to
:param port: the post to connect to
:param timeout: the timeout for read/write operations (in seconds)
:param heartbeat_interval: the amount of time (in seconds) to negotiate
with the connected producers to send heartbeats (requires nsqd 0.2.19+)
:param requeue_delay: the base multiple used when calculating requeue delay
(multiplied by # of attempts)
:param tls_v1: enable TLS v1 encryption (requires nsqd 0.2.22+)
:param tls_options: dictionary of options to pass to `ssl.wrap_socket()
<http://docs.python.org/2/library/ssl.html#ssl.wrap_socket>`_ as
``**kwargs``
:param snappy: enable Snappy stream compression (requires nsqd 0.2.23+)
:param deflate: enable deflate stream compression (requires nsqd 0.2.23+)
:param deflate_level: configure the deflate compression level for this
connection (requires nsqd 0.2.23+)
:param output_buffer_size: size of the buffer (in bytes) used by nsqd
for buffering writes to this connection
:param output_buffer_timeout: timeout (in ms) used by nsqd before
flushing buffered writes (set to 0 to disable). **Warning**:
configuring clients with an extremely low (``< 25ms``)
``output_buffer_timeout`` has a significant effect on ``nsqd``
CPU usage (particularly with ``> 50`` clients connected).
:param sample_rate: take only a sample of the messages being sent
to the client. Not setting this or setting it to 0 will ensure
you get all the messages destined for the client.
Sample rate can be greater than 0 or less than 100 and the client
will receive that percentage of the message traffic.
(requires nsqd 0.2.25+)
:param user_agent: a string identifying the agent for this client
in the spirit of HTTP (default: ``<client_library_name>/<version>``)
(requires nsqd 0.2.25+)
:param auth_secret: a byte string passed when using nsq auth
(requires nsqd 1.0+)
:param msg_timeout: the amount of time (in seconds) that nsqd will wait
before considering messages that have been delivered to this
consumer timed out (requires nsqd 0.2.28+)
:param hostname: a string identifying the host where this client runs
(default: ``<hostname>``)
"""
def __init__(
self,
host,
port,
timeout=1.0,
heartbeat_interval=30,
requeue_delay=90,
tls_v1=False,
tls_options=None,
snappy=False,
deflate=False,
deflate_level=6,
user_agent=DEFAULT_USER_AGENT,
output_buffer_size=16 * 1024,
output_buffer_timeout=250,
sample_rate=0,
io_loop=None,
auth_secret=None,
msg_timeout=None,
hostname=None):
assert isinstance(host, string_types)
assert isinstance(port, int)
assert isinstance(timeout, float)
assert isinstance(tls_options, (dict, None.__class__))
assert isinstance(deflate_level, int)
assert isinstance(heartbeat_interval, int) and heartbeat_interval >= 1
assert isinstance(requeue_delay, int) and requeue_delay >= 0
assert isinstance(output_buffer_size, int) and output_buffer_size >= 0
assert isinstance(output_buffer_timeout, int) and output_buffer_timeout >= 0
assert isinstance(sample_rate, int) and sample_rate >= 0 and sample_rate < 100
assert isinstance(auth_secret, bytes_types + (None.__class__,))
assert tls_v1 and ssl or not tls_v1, \
'tls_v1 requires Python 2.6+ or Python 2.5 w/ pip install ssl'
assert msg_timeout is None or (isinstance(msg_timeout, (float, int)) and msg_timeout > 0)
self.state = INIT
self.host = host
self.port = port
self.timeout = timeout
self.last_recv_timestamp = time.time()
self.last_msg_timestamp = time.time()
self.in_flight = 0
self.rdy = 0
self.rdy_timeout = None
# for backwards compatibility when interacting with older nsqd
# (pre 0.2.20), default this to their hard-coded max
self.max_rdy_count = 2500
self.tls_v1 = tls_v1
self.tls_options = tls_options
self.snappy = snappy
self.deflate = deflate
self.deflate_level = deflate_level
self.hostname = hostname
if self.hostname is None:
self.hostname = socket.gethostname()
self.short_hostname = self.hostname.split('.')[0]
self.heartbeat_interval = heartbeat_interval * 1000
self.msg_timeout = int(msg_timeout * 1000) if msg_timeout else None
self.requeue_delay = requeue_delay
self.io_loop = io_loop
if not self.io_loop:
self.io_loop = tornado.ioloop.IOLoop.instance()
self.output_buffer_size = output_buffer_size
self.output_buffer_timeout = output_buffer_timeout
self.sample_rate = sample_rate
self.user_agent = user_agent
self._authentication_required = False # tracking server auth state
self.auth_secret = auth_secret
self.socket = None
self.stream = None
self._features_to_enable = []
self.last_rdy = 0
self.rdy = 0
self.callback_queue = []
super(AsyncConn, self).__init__()
@property
def id(self):
return str(self)
def __str__(self):
return self.host + ':' + str(self.port)
def connected(self):
return self.state == CONNECTED
def connecting(self):
return self.state == CONNECTING
def closed(self):
return self.state in (INIT, DISCONNECTED)
def connect(self):
if not self.closed():
return
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(self.timeout)
self.socket.setblocking(0)
self.stream = tornado.iostream.IOStream(self.socket, io_loop=self.io_loop)
self.stream.set_close_callback(self._socket_close)
self.stream.set_nodelay(True)
self.state = CONNECTING
self.on(event.CONNECT, self._on_connect)
self.on(event.DATA, self._on_data)
self.stream.connect((self.host, self.port), self._connect_callback)
def _connect_callback(self):
self.state = CONNECTED
self.stream.write(protocol.MAGIC_V2)
self._start_read()
self.trigger(event.CONNECT, conn=self)
def _read_bytes(self, size, callback):
try:
self.stream.read_bytes(size, callback)
except IOError:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.ConnectionClosedError('Stream is closed'),
)
def _start_read(self):
self._read_bytes(4, self._read_size)
def _socket_close(self):
self.state = DISCONNECTED
self.trigger(event.CLOSE, conn=self)
def close(self):
self.stream.close()
def _read_size(self, data):
try:
size = struct_l.unpack(data)[0]
except Exception:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.IntegrityError('failed to unpack size'),
)
return
self._read_bytes(size, self._read_body)
def _read_body(self, data):
try:
self.trigger(event.DATA, conn=self, data=data)
except Exception:
logger.exception('uncaught exception in data event')
self._start_read()
def send(self, data):
self.stream.write(data)
def upgrade_to_tls(self, options=None):
assert ssl, 'tls_v1 requires Python 2.6+ or Python 2.5 w/ pip install ssl'
# in order to upgrade to TLS we need to *replace* the IOStream...
#
# first remove the event handler for the currently open socket
# so that when we add the socket to the new SSLIOStream below,
# it can re-add the appropriate event handlers.
self.io_loop.remove_handler(self.socket.fileno())
opts = {
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': default_ca_certs()
}
opts.update(options or {})
self.socket = ssl.wrap_socket(self.socket, ssl_version=ssl.PROTOCOL_TLSv1,
do_handshake_on_connect=False, **opts)
self.stream = tornado.iostream.SSLIOStream(self.socket, io_loop=self.io_loop)
self.stream.set_close_callback(self._socket_close)
# now that the IOStream has been swapped we can kickstart
# the SSL handshake
self.stream._do_ssl_handshake()
def upgrade_to_snappy(self):
assert SnappySocket, 'snappy requires the python-snappy package'
# in order to upgrade to Snappy we need to use whatever IOStream
# is currently in place (normal or SSL)...
#
# first read any compressed bytes the existing IOStream might have
# already buffered and use that to bootstrap the SnappySocket, then
# monkey patch the existing IOStream by replacing its socket
# with a wrapper that will automagically handle compression.
existing_data = self.stream._consume(self.stream._read_buffer_size)
self.socket = SnappySocket(self.socket)
self.socket.bootstrap(existing_data)
self.stream.socket = self.socket
def upgrade_to_deflate(self):
# in order to upgrade to DEFLATE we need to use whatever IOStream
# is currently in place (normal or SSL)...
#
# first read any compressed bytes the existing IOStream might have
# already buffered and use that to bootstrap the DefalteSocket, then
# monkey patch the existing IOStream by replacing its socket
# with a wrapper that will automagically handle compression.
existing_data = self.stream._consume(self.stream._read_buffer_size)
self.socket = DeflateSocket(self.socket, self.deflate_level)
self.socket.bootstrap(existing_data)
self.stream.socket = self.socket
def send_rdy(self, value):
try:
self.send(protocol.ready(value))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to send RDY %d' % value, e),
)
return False
self.last_rdy = value
self.rdy = value
return True
def _on_connect(self, **kwargs):
identify_data = {
'short_id': self.short_hostname, # TODO remove when deprecating pre 1.0 support
'long_id': self.hostname, # TODO remove when deprecating pre 1.0 support
'client_id': self.short_hostname,
'hostname': self.hostname,
'heartbeat_interval': self.heartbeat_interval,
'feature_negotiation': True,
'tls_v1': self.tls_v1,
'snappy': self.snappy,
'deflate': self.deflate,
'deflate_level': self.deflate_level,
'output_buffer_timeout': self.output_buffer_timeout,
'output_buffer_size': self.output_buffer_size,
'sample_rate': self.sample_rate,
'user_agent': self.user_agent
}
if self.msg_timeout:
identify_data['msg_timeout'] = self.msg_timeout
self.trigger(event.IDENTIFY, conn=self, data=identify_data)
self.on(event.RESPONSE, self._on_identify_response)
try:
self.send(protocol.identify(identify_data))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to bootstrap connection', e),
)
def _on_identify_response(self, data, **kwargs):
self.off(event.RESPONSE, self._on_identify_response)
if data == b'OK':
logger.warning('nsqd version does not support feature netgotiation')
return self.trigger(event.READY, conn=self)
try:
data = json.loads(data.decode('utf-8'))
except ValueError:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.IntegrityError(
'failed to parse IDENTIFY response JSON from nsqd - %r' %
data
),
)
return
self.trigger(event.IDENTIFY_RESPONSE, conn=self, data=data)
if self.tls_v1 and data.get('tls_v1'):
self._features_to_enable.append('tls_v1')
if self.snappy and data.get('snappy'):
self._features_to_enable.append('snappy')
if self.deflate and data.get('deflate'):
self._features_to_enable.append('deflate')
if data.get('auth_required'):
self._authentication_required = True
if data.get('max_rdy_count'):
self.max_rdy_count = data.get('max_rdy_count')
else:
# for backwards compatibility when interacting with older nsqd
# (pre 0.2.20), default this to their hard-coded max
logger.warn('setting max_rdy_count to default value of 2500')
self.max_rdy_count = 2500
self.on(event.RESPONSE, self._on_response_continue)
self._on_response_continue(conn=self, data=None)
def _on_response_continue(self, data, **kwargs):
if self._features_to_enable:
feature = self._features_to_enable.pop(0)
if feature == 'tls_v1':
self.upgrade_to_tls(self.tls_options)
elif feature == 'snappy':
self.upgrade_to_snappy()
elif feature == 'deflate':
self.upgrade_to_deflate()
# the server will 'OK' after these conneciton upgrades triggering another response
return
self.off(event.RESPONSE, self._on_response_continue)
if self.auth_secret and self._authentication_required:
self.on(event.RESPONSE, self._on_auth_response)
self.trigger(event.AUTH, conn=self, data=self.auth_secret)
try:
self.send(protocol.auth(self.auth_secret))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('Error sending AUTH', e),
)
return
self.trigger(event.READY, conn=self)
def _on_auth_response(self, data, **kwargs):
try:
data = json.loads(data.decode('utf-8'))
except ValueError:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.IntegrityError(
'failed to parse AUTH response JSON from nsqd - %r' % data
),
)
return
self.off(event.RESPONSE, self._on_auth_response)
self.trigger(event.AUTH_RESPONSE, conn=self, data=data)
return self.trigger(event.READY, conn=self)
def _on_data(self, data, **kwargs):
self.last_recv_timestamp = time.time()
frame, data = protocol.unpack_response(data)
if frame == protocol.FRAME_TYPE_MESSAGE:
self.last_msg_timestamp = time.time()
self.in_flight += 1
message = protocol.decode_message(data)
message.on(event.FINISH, self._on_message_finish)
message.on(event.REQUEUE, self._on_message_requeue)
message.on(event.TOUCH, self._on_message_touch)
self.trigger(event.MESSAGE, conn=self, message=message)
elif frame == protocol.FRAME_TYPE_RESPONSE and data == b'_heartbeat_':
self.send(protocol.nop())
self.trigger(event.HEARTBEAT, conn=self)
elif frame == protocol.FRAME_TYPE_RESPONSE:
self.trigger(event.RESPONSE, conn=self, data=data)
elif frame == protocol.FRAME_TYPE_ERROR:
self.trigger(event.ERROR, conn=self, error=protocol.Error(data))
def _on_message_requeue(self, message, backoff=True, time_ms=-1, **kwargs):
if backoff:
self.trigger(event.BACKOFF, conn=self)
else:
self.trigger(event.CONTINUE, conn=self)
self.in_flight -= 1
try:
time_ms = self.requeue_delay * message.attempts * 1000 if time_ms < 0 else time_ms
self.send(protocol.requeue(message.id, time_ms))
except Exception as e:
self.close()
self.trigger(event.ERROR, conn=self, error=protocol.SendError(
'failed to send REQ %s @ %d' % (message.id, time_ms), e))
def _on_message_finish(self, message, **kwargs):
self.trigger(event.RESUME, conn=self)
self.in_flight -= 1
try:
self.send(protocol.finish(message.id))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to send FIN %s' % message.id, e),
)
def _on_message_touch(self, message, **kwargs):
try:
self.send(protocol.touch(message.id))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to send TOUCH %s' % message.id, e),
)
| 35.527273 | 97 | 0.620829 |
388c62e3f98666b8e55e8c9d80ebd20a4d1de8b1 | 4,219 | py | Python | pyingest/parsers/default.py | golnazads/adsabs-pyingest | 37b37dd9e0d8a6e5cc34c59d30acd14e3381b48e | [
"MIT"
] | null | null | null | pyingest/parsers/default.py | golnazads/adsabs-pyingest | 37b37dd9e0d8a6e5cc34c59d30acd14e3381b48e | [
"MIT"
] | null | null | null | pyingest/parsers/default.py | golnazads/adsabs-pyingest | 37b37dd9e0d8a6e5cc34c59d30acd14e3381b48e | [
"MIT"
] | null | null | null | from past.builtins import basestring
import re
import bs4
import xmltodict as xmltodict_parser
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
import urllib2 as url_lib
except ImportError:
import urllib.request as url_lib
import warnings
import ssl
warnings.filterwarnings("ignore", category=UserWarning, module='bs4')
# The following line is to avoid (when doing JOSS harvesting):
# IOError: [Errno socket error] [SSL: CERTIFICATE_VERIFY_FAILED] certificate
# verify failed (_ssl.c:727)
ssl._create_default_https_context = ssl._create_unverified_context
class MissingParser(Exception):
pass
class UnknownHarvestMethod(Exception):
pass
class DefaultParser(object):
"""just a stub entry"""
def __init__(self):
raise MissingParser("No parser defined")
class BaseXmlToDictParser(object):
"""
An XML parser which uses xmltodict to create a dictionary
out of the input XML stream
"""
def xmltodict(self, fp, **kwargs):
"""returns a dict as created by xmltodict"""
return xmltodict_parser.parse(fp, **kwargs)
def _array(self, e):
"""Ensures that e is an array"""
if isinstance(e, type(None)):
return []
elif isinstance(e, list):
return e
else:
return [e]
def _dict(self, e, d={}):
"""Ensures that e is a dictionary"""
if isinstance(e, type(None)):
return d
elif isinstance(e, dict):
return e
else:
return d
def _text(self, e, d=''):
"""Returns text node of element e (or default d)"""
if isinstance(e, type(None)):
return d
elif isinstance(e, dict):
return e.get('#text', d)
elif isinstance(e, basestring):
return e
def _attr(self, e, k, d=''):
"""Returns attribute k from element e (or default d)"""
if isinstance(e, type(None)):
return d
elif isinstance(e, dict):
return e.get('@' + k, d)
elif isinstance(e, basestring):
return d
else:
return d
class BaseBeautifulSoupParser(object):
"""
An XML parser which uses BeautifulSoup to create a dictionary
out of the input XML stream. Used by jats.py and aps.py
"""
def bsfiletodict(self, fp, **kwargs):
"""returns a BeautifulSoup tree"""
return bs4.BeautifulSoup(fp.read(), 'html5lib', **kwargs)
def bsstrtodict(self, r, **kwargs):
"""returns a BeautifulSoup tree"""
return bs4.BeautifulSoup(r, 'html5lib', **kwargs)
class BaseRSSFeedParser(object):
"""
A parser that takes an RSS/Atom feed
"""
control_chars = ''.join(map(chr, list(range(0, 32)) + list(range(127, 160))))
control_char_re = re.compile('[%s]' % re.escape(control_chars))
def __init__(self):
self.errors = []
self.links = []
pass
def remove_control_chars(s):
return control_char_re.sub('', s)
def get_records(self, rssURL, data_tag='entry', headers={}, **kwargs):
qparams = urlencode(kwargs)
if qparams:
url = "%s?%s" % (rssURL, qparams)
else:
url = rssURL
if headers:
req = url_lib.Request(url, headers=headers)
else:
req = url_lib.Request(url)
source = url_lib.urlopen(req)
soup = bs4.BeautifulSoup(source, 'lxml')
# soup = bs4.BeautifulSoup(source, 'html5lib')
# NOTE: html5lib can't deal with bad encodings like lxml,
# and that's why the test fails.
entries = soup.find_all(data_tag)
try:
self.links = soup.find_all('link')
except Exception as err:
self.links = []
return entries
def parse(self, url, **kwargs):
rss_recs = [{}]
data = self.get_records(url, **kwargs)
for d in data:
try:
title = data.find('title').text
except Exception as err:
title = ''
rss_recs.append({
'title': title,
})
return rss_recs
| 27.575163 | 81 | 0.58758 |
c4124c2b2b4023fcf3a84dceee260dd9e6ddb406 | 4,020 | py | Python | incasem/utils/scale_pyramid.py | kirchhausenlab/incasem | ee9e007c5c04571e547e2fb5af5e800bd2d2b435 | [
"BSD-3-Clause"
] | null | null | null | incasem/utils/scale_pyramid.py | kirchhausenlab/incasem | ee9e007c5c04571e547e2fb5af5e800bd2d2b435 | [
"BSD-3-Clause"
] | null | null | null | incasem/utils/scale_pyramid.py | kirchhausenlab/incasem | ee9e007c5c04571e547e2fb5af5e800bd2d2b435 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import os
import daisy
import numpy as np
import skimage.measure
import zarr
# monkey-patch os.mkdirs, due to bug in zarr
prev_makedirs = os.makedirs
def makedirs(name, mode=0o777, exist_ok=False):
# always ok if exists
return prev_makedirs(name, mode, exist_ok=True)
os.makedirs = makedirs
def downscale_block(in_array, out_array, factor, block):
dims = len(factor)
in_data = in_array.to_ndarray(block.read_roi, fill_value=0)
in_shape = daisy.Coordinate(in_data.shape[-dims:])
assert in_shape.is_multiple_of(factor)
n_channels = len(in_data.shape) - dims
if n_channels >= 1:
factor = (1,) * n_channels + factor
if in_data.dtype == np.uint64 or in_data.dtype == np.uint32:
# BG: This is for the labels
slices = tuple(slice(k // 2, None, k) for k in factor)
out_data = in_data[slices]
else:
out_data = skimage.measure.block_reduce(in_data, factor, np.mean)
try:
out_array[block.write_roi] = out_data
except Exception:
print("Failed to write to %s" % block.write_roi)
raise
return 0
def downscale(in_array, out_array, factor, write_size):
print("Downsampling by factor %s" % (factor,))
dims = in_array.roi.dims()
block_roi = daisy.Roi((0,) * dims, write_size)
print("Processing ROI %s with blocks %s" % (out_array.roi, block_roi))
daisy.run_blockwise(
out_array.roi,
block_roi,
block_roi,
process_function=lambda b: downscale_block(
in_array,
out_array,
factor,
b),
read_write_conflict=False,
num_workers=32,
max_retries=0,
fit='shrink')
def scale_pyramid(in_file, in_ds_name, scales, chunk_shape):
ds = zarr.open(in_file)
# make sure in_ds_name points to a dataset
try:
daisy.open_ds(in_file, in_ds_name)
except Exception:
raise RuntimeError("%s does not seem to be a dataset" % in_ds_name)
if not in_ds_name.endswith('/s0'):
ds_name = in_ds_name + '/s0'
print("Moving %s to %s" % (in_ds_name, ds_name))
ds.store.rename(in_ds_name, in_ds_name + '__tmp')
ds.store.rename(in_ds_name + '__tmp', ds_name)
else:
ds_name = in_ds_name
in_ds_name = in_ds_name[:-3]
print("Scaling %s by a factor of %s" % (in_file, scales))
prev_array = daisy.open_ds(in_file, ds_name)
if chunk_shape is not None:
chunk_shape = daisy.Coordinate(chunk_shape)
else:
chunk_shape = daisy.Coordinate(prev_array.data.chunks)
print("Reusing chunk shape of %s for new datasets" % (chunk_shape,))
if prev_array.n_channel_dims == 0:
num_channels = 1
elif prev_array.n_channel_dims == 1:
num_channels = prev_array.shape[0]
else:
raise RuntimeError(
"more than one channel not yet implemented, sorry...")
for scale_num, scale in enumerate(scales):
try:
scale = daisy.Coordinate(scale)
except Exception:
scale = daisy.Coordinate((scale,) * chunk_shape.dims())
next_voxel_size = prev_array.voxel_size * scale
next_total_roi = prev_array.roi.snap_to_grid(
next_voxel_size,
mode='grow')
next_write_size = chunk_shape * next_voxel_size
print("Next voxel size: %s" % (next_voxel_size,))
print("Next total ROI: %s" % next_total_roi)
print("Next chunk size: %s" % (next_write_size,))
next_ds_name = in_ds_name + '/s' + str(scale_num + 1)
print("Preparing %s" % (next_ds_name,))
next_array = daisy.prepare_ds(
in_file,
next_ds_name,
total_roi=next_total_roi,
voxel_size=next_voxel_size,
write_size=next_write_size,
dtype=prev_array.dtype,
num_channels=num_channels)
downscale(prev_array, next_array, scale, next_write_size)
prev_array = next_array
| 27.724138 | 76 | 0.632836 |
37ede7338246715cd0bc34981d8e9081036d28e0 | 1,043 | py | Python | src/bulk_mover/populate_paths.py | StateArchivesOfNorthCarolina/sanc-repo-manager | 84e1782fd119244c58aa1401fd86a70a395ab3b7 | [
"MIT"
] | null | null | null | src/bulk_mover/populate_paths.py | StateArchivesOfNorthCarolina/sanc-repo-manager | 84e1782fd119244c58aa1401fd86a70a395ab3b7 | [
"MIT"
] | 4 | 2018-09-12T13:32:43.000Z | 2022-03-11T23:27:49.000Z | src/bulk_mover/populate_paths.py | StateArchivesOfNorthCarolina/sanc-repo-manager | 84e1782fd119244c58aa1401fd86a70a395ab3b7 | [
"MIT"
] | null | null | null | import os
import sys
from mover_classes.PathUnique import PathUnique
def file_chooser():
base_path = "L:\\Intranet\\ar\Digital_Services\\Inventory\\004_COMPLETED"
files = os.listdir(base_path)
for i in range(len(files)):
i += 1
print("{})\t{}".format(i, files[i-1]))
sel = input("Which file do you want to process: ")
return os.path.join(base_path, files[int(sel) - 1])
def add_to_paths():
pu = PathUnique()
with open(file_chooser(), 'r') as fh:
for line in fh.readlines():
s = line.strip().split("\t")
if not pu.is_unique(s[1]):
print(pu.current_path[0], pu.current_path[1])
continue
pu.add_to_paths(s[0], s[1])
pu.save()
def examine_paths():
pu = PathUnique()
for k, v in pu.print_paths():
print("{}\t{}".format(k, v[0]))
if __name__ == '__main__':
args = sys.argv[1]
if args == "a":
add_to_paths()
exit(0)
if args == "p":
examine_paths()
exit(0) | 23.704545 | 77 | 0.559923 |
ed24b0cbe02782bcb57bdea15984509d58994935 | 1,067 | py | Python | pytest_asyncio_network_simulator/server.py | vaporyproject/pytest-asyncio-network-simulator | 7a7ee136c8e47cde751c1a5af9739b1099810608 | [
"MIT"
] | 4 | 2019-06-05T23:53:04.000Z | 2021-11-04T14:24:21.000Z | pytest_asyncio_network_simulator/server.py | vaporyproject/pytest-asyncio-network-simulator | 7a7ee136c8e47cde751c1a5af9739b1099810608 | [
"MIT"
] | 5 | 2018-07-20T20:34:04.000Z | 2019-04-26T23:02:40.000Z | pytest_asyncio_network_simulator/server.py | vaporyproject/pytest-asyncio-network-simulator | 7a7ee136c8e47cde751c1a5af9739b1099810608 | [
"MIT"
] | 4 | 2018-08-23T07:43:12.000Z | 2020-10-01T03:00:27.000Z | import asyncio
from typing import (
List,
)
from .address import (
Address,
)
from .utils import (
ConnectionCallback,
)
class Server(asyncio.AbstractServer):
"""
Mock version of `asyncio.Server` object.
"""
connections: List[asyncio.StreamWriter]
def __init__(
self,
client_connected_cb: ConnectionCallback,
address: Address) -> None:
self.client_connected_cb = client_connected_cb
self.address = address
self.connections = []
def __repr__(self) -> str:
return '<%s %s>' % (self.__class__.__name__, self.address)
def close(self) -> None:
for writer in self.connections:
writer.write_eof()
async def wait_closed(self) -> None: # type: ignore
await asyncio.wait(
tuple(writer.drain() for writer in self.connections),
timeout=0.01,
return_when=asyncio.ALL_COMPLETED
)
def add_connection(self, writer: asyncio.StreamWriter) -> None:
self.connections.append(writer)
| 24.25 | 67 | 0.619494 |
2d246475e2629186750b17c8f1b7e88b471e7e7f | 8,313 | py | Python | statsmodels/robust/scale.py | yarikoptic/statsmodels | f990cb1a1ef0c9883c9394444e6f9d027efabec6 | [
"BSD-3-Clause"
] | 1 | 2021-11-04T04:57:15.000Z | 2021-11-04T04:57:15.000Z | statsmodels/robust/scale.py | yarikoptic/statsmodels | f990cb1a1ef0c9883c9394444e6f9d027efabec6 | [
"BSD-3-Clause"
] | null | null | null | statsmodels/robust/scale.py | yarikoptic/statsmodels | f990cb1a1ef0c9883c9394444e6f9d027efabec6 | [
"BSD-3-Clause"
] | 2 | 2017-05-31T21:21:33.000Z | 2021-09-12T12:19:03.000Z | """
Support and standalone functions for Robust Linear Models
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York, 1981.
R Venables, B Ripley. 'Modern Applied Statistics in S'
Springer, New York, 2002.
"""
from statsmodels.compat.python import callable, range
import numpy as np
from scipy.stats import norm as Gaussian
from . import norms
from statsmodels.tools import tools
def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median):
# c \approx .6745
"""
The Median Absolute Deviation along given axis of an array
Parameters
----------
a : array-like
Input array.
c : float, optional
The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),
which is approximately .6745.
axis : int, optional
The defaul is 0. Can also be None.
center : callable or float
If a callable is provided, such as the default `np.median` then it
is expected to be called center(a). The axis argument will be applied
via np.apply_over_axes. Otherwise, provide a float.
Returns
-------
mad : float
`mad` = median(abs(`a` - center))/`c`
"""
a = np.asarray(a)
if callable(center):
center = np.apply_over_axes(center, a, axis)
return np.median((np.fabs(a-center))/c, axis=axis)
def stand_mad(a, c=Gaussian.ppf(3/4.), axis=0):
from warnings import warn
warn("stand_mad is deprecated and will be removed in 0.7.0. Use mad "
"instead.", FutureWarning)
return mad(a, c=c, axis=axis)
class Huber(object):
"""
Huber's proposal 2 for estimating location and scale jointly.
Parameters
----------
c : float, optional
Threshold used in threshold for chi=psi**2. Default value is 1.5.
tol : float, optional
Tolerance for convergence. Default value is 1e-08.
maxiter : int, optional0
Maximum number of iterations. Default value is 30.
norm : statsmodels.robust.norms.RobustNorm, optional
A robust norm used in M estimator of location. If None,
the location estimator defaults to a one-step
fixed point version of the M-estimator using Huber's T.
call
Return joint estimates of Huber's scale and location.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> chem_data = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
... 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7, 3.7, 3.7,
... 3.77, 5.28, 28.95])
>>> sm.robust.scale.huber(chem_data)
(array(3.2054980819923693), array(0.67365260010478967))
"""
def __init__(self, c=1.5, tol=1.0e-08, maxiter=30, norm=None):
self.c = c
self.maxiter = maxiter
self.tol = tol
self.norm = norm
tmp = 2 * Gaussian.cdf(c) - 1
self.gamma = tmp + c**2 * (1 - tmp) - 2 * c * Gaussian.pdf(c)
def __call__(self, a, mu=None, initscale=None, axis=0):
"""
Compute Huber's proposal 2 estimate of scale, using an optional
initial value of scale and an optional estimate of mu. If mu
is supplied, it is not reestimated.
Parameters
----------
a : array
1d array
mu : float or None, optional
If the location mu is supplied then it is not reestimated.
Default is None, which means that it is estimated.
initscale : float or None, optional
A first guess on scale. If initscale is None then the standardized
median absolute deviation of a is used.
Notes
-----
`Huber` minimizes the function
sum(psi((a[i]-mu)/scale)**2)
as a function of (mu, scale), where
psi(x) = np.clip(x, -self.c, self.c)
"""
a = np.asarray(a)
if mu is None:
n = a.shape[0] - 1
mu = np.median(a, axis=axis)
est_mu = True
else:
n = a.shape[0]
mu = mu
est_mu = False
if initscale is None:
scale = mad(a, axis=axis)
else:
scale = initscale
scale = tools.unsqueeze(scale, axis, a.shape)
mu = tools.unsqueeze(mu, axis, a.shape)
return self._estimate_both(a, scale, mu, axis, est_mu, n)
def _estimate_both(self, a, scale, mu, axis, est_mu, n):
"""
Estimate scale and location simultaneously with the following
pseudo_loop:
while not_converged:
mu, scale = estimate_location(a, scale, mu), estimate_scale(a, scale, mu)
where estimate_location is an M-estimator and estimate_scale implements
the check used in Section 5.5 of Venables & Ripley
"""
for _ in range(self.maxiter):
# Estimate the mean along a given axis
if est_mu:
if self.norm is None:
# This is a one-step fixed-point estimator
# if self.norm == norms.HuberT
# It should be faster than using norms.HuberT
nmu = np.clip(a, mu-self.c*scale,
mu+self.c*scale).sum(axis) / a.shape[axis]
else:
nmu = norms.estimate_location(a, scale, self.norm, axis, mu,
self.maxiter, self.tol)
else:
# Effectively, do nothing
nmu = mu.squeeze()
nmu = tools.unsqueeze(nmu, axis, a.shape)
subset = np.less_equal(np.fabs((a - mu)/scale), self.c)
card = subset.sum(axis)
nscale = np.sqrt(np.sum(subset * (a - nmu)**2, axis) \
/ (n * self.gamma - (a.shape[axis] - card) * self.c**2))
nscale = tools.unsqueeze(nscale, axis, a.shape)
test1 = np.alltrue(np.less_equal(np.fabs(scale - nscale),
nscale * self.tol))
test2 = np.alltrue(np.less_equal(np.fabs(mu - nmu), nscale*self.tol))
if not (test1 and test2):
mu = nmu; scale = nscale
else:
return nmu.squeeze(), nscale.squeeze()
raise ValueError('joint estimation of location and scale failed to converge in %d iterations' % self.maxiter)
huber = Huber()
class HuberScale(object):
"""
Huber's scaling for fitting robust linear models.
Huber's scale is intended to be used as the scale estimate in the
IRLS algorithm and is slightly different than the `Huber` class.
Parameters
----------
d : float, optional
d is the tuning constant for Huber's scale. Default is 2.5
tol : float, optional
The convergence tolerance
maxiter : int, optiona
The maximum number of iterations. The default is 30.
Methods
-------
call
Return's Huber's scale computed as below
Notes
--------
Huber's scale is the iterative solution to
scale_(i+1)**2 = 1/(n*h)*sum(chi(r/sigma_i)*sigma_i**2
where the Huber function is
chi(x) = (x**2)/2 for \|x\| < d
chi(x) = (d**2)/2 for \|x\| >= d
and the Huber constant h = (n-p)/n*(d**2 + (1-d**2)*\
scipy.stats.norm.cdf(d) - .5 - d*sqrt(2*pi)*exp(-0.5*d**2)
"""
def __init__(self, d=2.5, tol=1e-08, maxiter=30):
self.d = d
self.tol = tol
self.maxiter = maxiter
def __call__(self, df_resid, nobs, resid):
h = (df_resid)/nobs*(self.d**2 + (1-self.d**2)*\
Gaussian.cdf(self.d)-.5 - self.d/(np.sqrt(2*np.pi))*\
np.exp(-.5*self.d**2))
s = mad(resid)
subset = lambda x: np.less(np.fabs(resid/x),self.d)
chi = lambda s: subset(s)*(resid/s)**2/2+(1-subset(s))*(self.d**2/2)
scalehist = [np.inf,s]
niter = 1
while (np.abs(scalehist[niter-1] - scalehist[niter])>self.tol \
and niter < self.maxiter):
nscale = np.sqrt(1/(nobs*h)*np.sum(chi(scalehist[-1]))*\
scalehist[-1]**2)
scalehist.append(nscale)
niter += 1
#if niter == self.maxiter:
# raise ValueError("Huber's scale failed to converge")
return scalehist[-1]
hubers_scale = HuberScale()
| 34.35124 | 117 | 0.56562 |
01adb0ec1da45e87e33c7e699b3d497cd08b79ab | 2,062 | py | Python | release/stubs.min/System/__init___parts/OperationCanceledException.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/__init___parts/OperationCanceledException.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/__init___parts/OperationCanceledException.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class OperationCanceledException(SystemException, ISerializable, _Exception):
"""
The exception that is thrown in a thread upon cancellation of an operation that the thread was executing.
OperationCanceledException()
OperationCanceledException(message: str)
OperationCanceledException(message: str,innerException: Exception)
OperationCanceledException(token: CancellationToken)
OperationCanceledException(message: str,token: CancellationToken)
OperationCanceledException(message: str,innerException: Exception,token: CancellationToken)
"""
def add_SerializeObjectState(self, *args):
""" add_SerializeObjectState(self: Exception,value: EventHandler[SafeSerializationEventArgs]) """
pass
def remove_SerializeObjectState(self, *args):
""" remove_SerializeObjectState(self: Exception,value: EventHandler[SafeSerializationEventArgs]) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, *__args):
"""
__new__(cls: type)
__new__(cls: type,message: str)
__new__(cls: type,message: str,innerException: Exception)
__new__(cls: type,token: CancellationToken)
__new__(cls: type,message: str,token: CancellationToken)
__new__(cls: type,message: str,innerException: Exception,token: CancellationToken)
__new__(cls: type,info: SerializationInfo,context: StreamingContext)
"""
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
CancellationToken = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a token associated with the operation that was canceled.
Get: CancellationToken(self: OperationCanceledException) -> CancellationToken
"""
| 29.884058 | 221 | 0.707081 |
b4aba398872a37900a33e44d17f8ed89e00bb701 | 1,855 | py | Python | pli/misc/namespace.py | flynx/pli | 805234daba05d21698123883fabf4a877f2af8c1 | [
"BSD-3-Clause"
] | 1 | 2016-05-08T06:29:25.000Z | 2016-05-08T06:29:25.000Z | pli/misc/namespace.py | flynx/pli | 805234daba05d21698123883fabf4a877f2af8c1 | [
"BSD-3-Clause"
] | null | null | null | pli/misc/namespace.py | flynx/pli | 805234daba05d21698123883fabf4a877f2af8c1 | [
"BSD-3-Clause"
] | null | null | null | #=======================================================================
__version__ = '''0.0.01'''
__sub_version__ = '''20040531024624'''
__copyright__ = '''(c) Alex A. Naanou 2003'''
#-----------------------------------------------------------------------
import sys
import pli.logictypes as logictypes
#-----------------------------------------------------------------------
#-----------------------------------------------------------NameSpace---
class NameSpace(logictypes.DictUnion):
'''
'''
def __getitem__(self, name):
'''
'''
if type(name) is int:
# return the scope...
return self._members[name]
return super(NameSpace, self).__getitem__(name)
def __setitem__(self, name, value):
'''
this will set a variable to the topmost scope.
'''
if type(name) not in (str, unicode):
raise TypeError, 'a name must be of either str or unicode type (got %s).' % type(name)
self._members[0][name] = value
def locals(self):
'''
this will return the locals dict.
NOTE: this is live; e.g. changing the return will change the locals.
'''
return self._members[0]
##!!! fix this...
def globals(self):
'''
this is python compatible globals method.
NOTE: this is live; e.g. changing the return will change the locals.
'''
return logictypes.DictUnion(*self._members[1:])
#-----------------------------------------------------------------------
#-----------------------------------------------------------namespace---
def namespace():
'''
this will return a dict union object that represents the current naespace stack.
'''
getframe = sys._getframe
res = NameSpace()
i = 1
try:
while True:
res.tailunite(getframe(i).f_locals)
i += 1
except ValueError:
pass
return res
#=======================================================================
# vim:set ts=4 sw=4 nowrap :
| 25.410959 | 89 | 0.489488 |
07c1acb54a02e4e29a521b4a6917e6635c8b35aa | 5,071 | py | Python | src/fhir_types/FHIR_Person.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | 2 | 2022-02-03T00:51:30.000Z | 2022-02-03T18:42:43.000Z | src/fhir_types/FHIR_Person.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | src/fhir_types/FHIR_Person.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | from typing import Any, List, Literal, TypedDict
from .FHIR_Address import FHIR_Address
from .FHIR_Attachment import FHIR_Attachment
from .FHIR_boolean import FHIR_boolean
from .FHIR_code import FHIR_code
from .FHIR_ContactPoint import FHIR_ContactPoint
from .FHIR_date import FHIR_date
from .FHIR_Element import FHIR_Element
from .FHIR_HumanName import FHIR_HumanName
from .FHIR_id import FHIR_id
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_Meta import FHIR_Meta
from .FHIR_Narrative import FHIR_Narrative
from .FHIR_Person_Link import FHIR_Person_Link
from .FHIR_Reference import FHIR_Reference
from .FHIR_uri import FHIR_uri
# Demographics and administrative information about a person independent of a specific health-related context.
FHIR_Person = TypedDict(
"FHIR_Person",
{
# This is a Person resource
"resourceType": Literal["Person"],
# The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes.
"id": FHIR_id,
# The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource.
"meta": FHIR_Meta,
# A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc.
"implicitRules": FHIR_uri,
# Extensions for implicitRules
"_implicitRules": FHIR_Element,
# The base language in which the resource is written.
"language": FHIR_code,
# Extensions for language
"_language": FHIR_Element,
# A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety.
"text": FHIR_Narrative,
# These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope.
"contained": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Identifier for a person within a particular scope.
"identifier": List[FHIR_Identifier],
# A name associated with the person.
"name": List[FHIR_HumanName],
# A contact detail for the person, e.g. a telephone number or an email address.
"telecom": List[FHIR_ContactPoint],
# Administrative Gender.
"gender": Literal["male", "female", "other", "unknown"],
# Extensions for gender
"_gender": FHIR_Element,
# The birth date for the person.
"birthDate": FHIR_date,
# Extensions for birthDate
"_birthDate": FHIR_Element,
# One or more addresses for the person.
"address": List[FHIR_Address],
# An image that can be displayed as a thumbnail of the person to enhance the identification of the individual.
"photo": FHIR_Attachment,
# The organization that is the custodian of the person record.
"managingOrganization": FHIR_Reference,
# Whether this person's record is in active use.
"active": FHIR_boolean,
# Extensions for active
"_active": FHIR_Element,
# Link to a resource that concerns the same actual person.
"link": List[FHIR_Person_Link],
},
total=False,
)
| 68.527027 | 839 | 0.741668 |
7e56ff47f62c5773840fd89342f2831fa7d1530b | 2,480 | py | Python | scripts/gauss/blast_ex1.py | biobureaubiotech/blast_bench | 5c33d4b71dffde9176f41e1f8378d3e994a5e782 | [
"BSD-3-Clause"
] | null | null | null | scripts/gauss/blast_ex1.py | biobureaubiotech/blast_bench | 5c33d4b71dffde9176f41e1f8378d3e994a5e782 | [
"BSD-3-Clause"
] | null | null | null | scripts/gauss/blast_ex1.py | biobureaubiotech/blast_bench | 5c33d4b71dffde9176f41e1f8378d3e994a5e782 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from threading import Thread, Event
import time
# import threading
import psutil
import datetime
import os
import subprocess
# sizes = [0.015625, 0.03125, 0.0625, 0.125, 0.25, 0.5]
size = 0.015625
# command = "blastn -db nt -evalue 1e-05 -query arquivo.fasta -out arquivoblast"
#monitor cpu and memory
# [1:09 PM, 3/14/2016] Mauro: Monitorar o desempenho das máquinas (se alcançam o máximo de CPU ou memória; se travam)
# [1:10 PM, 3/14/2016] Mauro: E verificar a relação (tamanho, no de reads, no de hits) entre os arquivos de entrada e saída.
# Raony, sugiro:
# 1) Pega 1 dos arquivos 'good', quebra ele em diferentes tamanhos: 50, 25, 12.5, 6.25, 3.125 1,5625% do original
# 2) Roda cada um em um webservice diferente, em instâncias padrão da AWS de aproximadamente 8, 20 e 50 Gb de RAM, com o processamento correspondente.
# 3) monitore: tempo de processamento em cada instância, uso médio da CPU e da RAM, tamanho do arquivo de saída.
# 4) quando fragmentar o arquivo inicial em pedaços de 6,25% do total, coloque 8 deles (~50%) na fila do mesmo webservice pra monitorar o tempo de execução e comparar com 1 arquivo de 50%
file_prefix = str(size).replace('.','_')
output = open("monitor_%s.log" % (file_prefix), "w")
def monitor(arg1, stop_event):
while(not stop_event.is_set()):
# time.sleep(60)
cpu = psutil.cpu_percent(interval=5)
mem = psutil.virtual_memory()
output_list = []
output_list.append("DATE:"+str(datetime.datetime.now()))
used = mem.total - mem.available
output_list.append("CPU:"+str(cpu))
output_list.append("MEMORY:"+str(int(used / 1024 / 1024))+" MB")
output.writelines("\t".join(output_list)+"\n")
print(output_list)
t2_stop= Event()
monitor = Thread(target=monitor, args=(2, t2_stop))
monitor.start()
#run blasts
# for size in sizes:
print("Running BLAST for %s \n" % (size))
output.writelines("Running BLAST for %s \n" % (size))
filename = "input_blast_%s.fasta" % (file_prefix)
command = "time /dados/raonygui/programs/ncbi-blast-2.3.0+/bin/blastn -db /dados/raonygui/blast/nt -evalue 1e-05 -query /dados/raonygui/blast_bench/input/%s -out blast_output_%s.fasta -num_threads 24" % (filename, file_prefix)
# command = "sleep 2"
out = subprocess.check_output(command.split())
print(out.decode("utf-8") )
output.writelines(out)
#stop monitor
t2_stop.set()
monitor.join()
output.close()
| 31.392405 | 226 | 0.693548 |
cb9930ae29d3273a2eddcbc533fe67c17c574dfc | 13,128 | py | Python | netbox/extras/forms.py | LambdaDriver/netbox | 1ea368856bbce71a97ee2deef8e91a29f348d2f3 | [
"Apache-2.0"
] | null | null | null | netbox/extras/forms.py | LambdaDriver/netbox | 1ea368856bbce71a97ee2deef8e91a29f348d2f3 | [
"Apache-2.0"
] | 2 | 2021-06-08T23:01:37.000Z | 2021-09-08T02:56:32.000Z | netbox/extras/forms.py | Dave-Snigier/netbox | 1ea368856bbce71a97ee2deef8e91a29f348d2f3 | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from mptt.forms import TreeNodeMultipleChoiceField
from taggit.forms import TagField as TagField_
from dcim.models import DeviceRole, Platform, Region, Site
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
add_blank_choice, APISelectMultiple, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect, ColorSelect,
ContentTypeSelect, CSVModelForm, DateTimePicker, DynamicModelMultipleChoiceField, JSONField, SlugField,
StaticSelect2, StaticSelect2Multiple, BOOLEAN_WITH_BLANK_CHOICES,
)
from virtualization.models import Cluster, ClusterGroup
from .choices import *
from .models import ConfigContext, CustomField, CustomFieldValue, ImageAttachment, ObjectChange, Tag
#
# Custom fields
#
class CustomFieldModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self._meta.model)
self.custom_fields = []
self.custom_field_values = {}
super().__init__(*args, **kwargs)
self._append_customfield_fields()
def _append_customfield_fields(self):
"""
Append form fields for all CustomFields assigned to this model.
"""
# Retrieve initial CustomField values for the instance
if self.instance.pk:
for cfv in CustomFieldValue.objects.filter(
obj_type=self.obj_type,
obj_id=self.instance.pk
).prefetch_related('field'):
self.custom_field_values[cfv.field.name] = cfv.serialized_value
# Append form fields; assign initial values if modifying and existing object
for cf in CustomField.objects.filter(obj_type=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
if self.instance.pk:
self.fields[field_name] = cf.to_form_field(set_initial=False)
self.fields[field_name].initial = self.custom_field_values.get(cf.name)
else:
self.fields[field_name] = cf.to_form_field()
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
def _save_custom_fields(self):
for field_name in self.custom_fields:
try:
cfv = CustomFieldValue.objects.prefetch_related('field').get(
field=self.fields[field_name].model,
obj_type=self.obj_type,
obj_id=self.instance.pk
)
except CustomFieldValue.DoesNotExist:
# Skip this field if none exists already and its value is empty
if self.cleaned_data[field_name] in [None, '']:
continue
cfv = CustomFieldValue(
field=self.fields[field_name].model,
obj_type=self.obj_type,
obj_id=self.instance.pk
)
cfv.value = self.cleaned_data[field_name]
cfv.save()
def save(self, commit=True):
obj = super().save(commit)
# Handle custom fields the same way we do M2M fields
if commit:
self._save_custom_fields()
else:
self.save_custom_fields = self._save_custom_fields
return obj
class CustomFieldModelCSVForm(CSVModelForm, CustomFieldModelForm):
def _append_customfield_fields(self):
# Append form fields
for cf in CustomField.objects.filter(obj_type=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(for_csv_import=True)
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
class CustomFieldBulkEditForm(BulkEditForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.custom_fields = []
self.obj_type = ContentType.objects.get_for_model(self.model)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(obj_type=self.obj_type)
for cf in custom_fields:
# Annotate non-required custom fields as nullable
if not cf.required:
self.nullable_fields.append(cf.name)
self.fields[cf.name] = cf.to_form_field(set_initial=False, enforce_required=False)
# Annotate this as a custom field
self.custom_fields.append(cf.name)
class CustomFieldFilterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self.model)
super().__init__(*args, **kwargs)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(obj_type=self.obj_type).exclude(
filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED
)
for cf in custom_fields:
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(set_initial=True, enforce_required=False)
#
# Tags
#
class TagField(TagField_):
def widget_attrs(self, widget):
# Apply the "tagfield" CSS class to trigger the special API-based selection widget for tags
return {
'class': 'tagfield'
}
class TagForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = [
'name', 'slug', 'color', 'description'
]
class AddRemoveTagsForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Add add/remove tags fields
self.fields['add_tags'] = TagField(required=False)
self.fields['remove_tags'] = TagField(required=False)
class TagFilterForm(BootstrapMixin, forms.Form):
model = Tag
q = forms.CharField(
required=False,
label='Search'
)
class TagBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=forms.MultipleHiddenInput
)
color = forms.CharField(
max_length=6,
required=False,
widget=ColorSelect()
)
description = forms.CharField(
max_length=200,
required=False
)
class Meta:
nullable_fields = ['description']
#
# Config contexts
#
class ConfigContextForm(BootstrapMixin, forms.ModelForm):
regions = TreeNodeMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
widget=StaticSelect2Multiple()
)
sites = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False
)
roles = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False
)
platforms = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False
)
cluster_groups = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False
)
clusters = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False
)
tenant_groups = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
required=False
)
tenants = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
data = JSONField(
label=''
)
class Meta:
model = ConfigContext
fields = (
'name', 'weight', 'description', 'is_active', 'regions', 'sites', 'roles', 'platforms', 'cluster_groups',
'clusters', 'tenant_groups', 'tenants', 'tags', 'data',
)
class ConfigContextBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=ConfigContext.objects.all(),
widget=forms.MultipleHiddenInput
)
weight = forms.IntegerField(
required=False,
min_value=0
)
is_active = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect()
)
description = forms.CharField(
required=False,
max_length=100
)
class Meta:
nullable_fields = [
'description',
]
class ConfigContextFilterForm(BootstrapMixin, forms.Form):
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False,
widget=APISelectMultiple(
value_field="slug",
)
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
widget=APISelectMultiple(
value_field="slug",
)
)
role = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
to_field_name='slug',
required=False,
widget=APISelectMultiple(
value_field="slug",
)
)
platform = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
to_field_name='slug',
required=False,
widget=APISelectMultiple(
value_field="slug",
)
)
cluster_group = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
to_field_name='slug',
required=False,
widget=APISelectMultiple(
value_field="slug",
)
)
cluster_id = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False,
label='Cluster'
)
tenant_group = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
to_field_name='slug',
required=False,
widget=APISelectMultiple(
value_field="slug",
)
)
tenant = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
to_field_name='slug',
required=False,
widget=APISelectMultiple(
value_field="slug",
)
)
tag = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
to_field_name='slug',
required=False,
widget=APISelectMultiple(
value_field="slug",
)
)
#
# Filter form for local config context data
#
class LocalConfigContextFilterForm(forms.Form):
local_context_data = forms.NullBooleanField(
required=False,
label='Has local config context data',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
#
# Image attachments
#
class ImageAttachmentForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ImageAttachment
fields = [
'name', 'image',
]
#
# Change logging
#
class ObjectChangeFilterForm(BootstrapMixin, forms.Form):
model = ObjectChange
q = forms.CharField(
required=False,
label='Search'
)
time_after = forms.DateTimeField(
label='After',
required=False,
widget=DateTimePicker()
)
time_before = forms.DateTimeField(
label='Before',
required=False,
widget=DateTimePicker()
)
action = forms.ChoiceField(
choices=add_blank_choice(ObjectChangeActionChoices),
required=False,
widget=StaticSelect2()
)
# TODO: Convert to DynamicModelMultipleChoiceField once we have an API endpoint for users
user = forms.ModelChoiceField(
queryset=User.objects.order_by('username'),
required=False,
widget=StaticSelect2()
)
changed_object_type = forms.ModelChoiceField(
queryset=ContentType.objects.order_by('model'),
required=False,
widget=ContentTypeSelect(),
label='Object Type'
)
#
# Scripts
#
class ScriptForm(BootstrapMixin, forms.Form):
_commit = forms.BooleanField(
required=False,
initial=True,
label="Commit changes",
help_text="Commit changes to the database (uncheck for a dry-run)"
)
def __init__(self, vars, *args, commit_default=True, **kwargs):
# Dynamically populate fields for variables
for name, var in vars.items():
self.base_fields[name] = var.as_field()
super().__init__(*args, **kwargs)
# Toggle default commit behavior based on Meta option
if not commit_default:
self.fields['_commit'].initial = False
# Move _commit to the end of the form
commit = self.fields.pop('_commit')
self.fields['_commit'] = commit
@property
def requires_input(self):
"""
A boolean indicating whether the form requires user input (ignore the _commit field).
"""
return bool(len(self.fields) > 1)
| 28.852747 | 117 | 0.635664 |
6d0fcf85354ff9b70ea5673e84b7a1261e247184 | 9,470 | py | Python | trpg/RPGCheck.py | kwoolter/TRPGFramework | 758bd497907f40b7a6ffd0b1937fb802ed3074bc | [
"CNRI-Python"
] | 5 | 2017-04-14T11:18:58.000Z | 2021-10-14T02:34:42.000Z | trpg/RPGCheck.py | kwoolter/TRPGFramework | 758bd497907f40b7a6ffd0b1937fb802ed3074bc | [
"CNRI-Python"
] | null | null | null | trpg/RPGCheck.py | kwoolter/TRPGFramework | 758bd497907f40b7a6ffd0b1937fb802ed3074bc | [
"CNRI-Python"
] | null | null | null | __author__ = 'KeithW'
from .RPGCharacter import *
import random
class RPGCheck(object):
# Constants for storing a player's state of a check
NOT_ATTEMPTED = 0
FAILED = -1
SUCCEEDED = 1
SILENT_REWARD = "***"
STAT_NOT_FOUND_DEFAULT = -19680926
# Construct a basic check
# A name, optional description and optional completion message
def __init__(self, name : str, type : str, description : str = "", completion_msg : str=""):
self.name = name
self.type = type
self.description = description
self.completion_msg = completion_msg
self._rewards = []
self._prerequisites = []
self._checks = []
self.is_rewarded = False
# Convert a check to a str
def __str__(self):
text = "%s:%s:%s" % (self.name, self.type, self.description)
# text += (" - pre-reqs(%i), checks(%i), rewards(%i)" % (len(self._prerequisites), len(self._checks), len(self._rewards)))
return text
# Add a reward stat if you succeed the check
def add_reward(self, new_reward : BaseStat):
self._rewards.append(new_reward)
# Return the list of rewards only if they have been earned!
def get_rewards(self):
if self.is_rewarded == True:
return self._rewards.copy()
else:
return set()
# Add pre-requisite stat that must be met before you can attempt the check
def add_pre_requisite(self, new_prerequisite : BaseStat):
self._prerequisites.append(new_prerequisite)
# Add a check stat that is made when you attempt this challenge
# Optional randomiser parameter to randomly increase the provided stat
def add_check(self, new_check : BaseStat, randomiser : int = 0):
new_check.value += random.randint(0, randomiser )
self._checks.append(new_check)
# See if the check is available based on a character's current stats
def is_available(self, character : Character):
available = True
# Loop through all of the check's prerequisite stats
for stat in self._prerequisites:
# If the stat has no scope defined then default is that the stat belongs to the Character
if hasattr(stat,"scope") is False:
stat.scope = False
# Get what the player's equivalent stat is
player_stat = character.get_stat(stat.name, global_stat=stat.scope)
# If the player does not have this stat then use a special default value
# This is because some pre-reqs might require the absence of a stat
if player_stat is None:
player_stat = BaseStat(stat.name, "AUTO",RPGCheck.STAT_NOT_FOUND_DEFAULT)
logging.info("%s.is_available():Check %s: Player %s does not have stat %s", \
__class__, self.name, character.name, stat.name)
#compare the prerequisite vs player_stat
logging.info("%s.is_available():Check %s: Player %s %s going to compare - %i vs %i", \
__class__, self.name, character.name, stat.name, \
player_stat.value, stat.value)
available = self.compare(player_stat, stat)
if available is False:
logging.info("%s.is_available():Check %s: Player %s %s FAILED compare - %i vs %i", \
__class__, self.name, character.name, stat.name, \
player_stat.value, stat.value)
break
logging.info("%s.is_available: Check %s available = %r", __class__, self.name, available)
return available
# Check if a check has been completed based on the current player stats
def is_completed(self, character : Character):
completed = True
# Attempt to get this challenge stat from the player's stats
stat = character.get_stat(self.name)
# If we couldn't find it or it was marked as anything but succeeded the flag as incomplete
if stat is None or stat.value != RPGCheck.SUCCEEDED:
completed = False
else:
logging.info("Check %s completed %s",stat.name, stat.update_time.strftime("%Y-%m-%d %H:%M"))
return completed
# Attempt a check
def attempt(self, character : Character):
# Assume we are going to succeed
succeed = True
self.is_rewarded = False
# See if there is a stat for this challenge already
stat = character.get_stat(self.name)
# If not then create one and add it to the supplied engine
if stat is None:
stat = CoreStat(self.name,self.type,RPGCheck.NOT_ATTEMPTED)
character.add_stat(stat)
# If we have already successfully completed the check then we are done
if stat.value == RPGCheck.SUCCEEDED:
logging.info("%s.attempt(). Check %s already completed.", __class__, self.name)
return succeed
# Else check that the player meets the prerequisites...
elif self.is_available(character) == True:
# Now compare the check stats with those of the player
for check_stat in self._checks:
compare_stat = character.get_stat(check_stat.name, global_stat=check_stat.scope)
# If the player does not have this stat then use a special default value
# This is because some checks might require the absence of a stat
if compare_stat is None:
compare_stat = BaseStat(check_stat.name, "AUTO",RPGCheck.STAT_NOT_FOUND_DEFAULT)
logging.info("%s.attempt(): Player stat not found %s %s.", \
__class__, self.name, check_stat.name)
# raise Exception(check_stat.failure_msg)
logging.info("%s.attempt(): Check %s checking %s %i vs player %i", \
__class__, self.name, check_stat.name, check_stat.value, compare_stat.value)
# Make the check comparison of the check stat vs. the player's stat
# and raise an exception if the check fails
if self.compare(compare_stat, check_stat) is False:
logging.info("%s.attempt(): Check %s %s failed", \
__class__, self.name, check_stat.name)
raise Exception(check_stat.failure_msg)
# OK, we have been through the check stats, now see if we succeeded...
if succeed == True:
# Set the flag to indicate we succeeded the challenge
character.update_stat(self.name, RPGCheck.SUCCEEDED)
logging.info("%s.attempt(): Check %s completed", __class__, self.name)
#.. and that we are going to get rewarded...
self.is_rewarded = True
# and dish out the rewards...
for stat in self._rewards:
current_stat = character.get_stat(stat.name, global_stat=stat.scope)
if current_stat is not None:
if stat.operator is None or stat.operator == "eq":
character.update_stat(stat.name, stat.value, global_stat=stat.scope)
else:
character.increment_stat(stat.name, stat.value, global_stat=stat.scope)
else:
new_stat = CoreStat(stat.name,"Reward", stat.value)
character.add_stat(new_stat, global_stat=stat.scope)
logging.info("%s.attempt(): Reward %s +%i", __class__, stat.name, stat.value)
# Challenge pre-requisites not met - attempt FAIL
else:
succeed = False
logging.info("%s.attempt(): Challenge %s failed as player did not meet prerequisites", __class__, self.name)
# After all see if we failed the attempt and log this
if succeed == False:
character.update_stat(self.name, RPGCheck.FAILED)
return succeed
def compare(self, stat1, stat2):
result = False
text = ""
if hasattr(stat2, "comparator"):
comparator = stat2.comparator
else:
comparator = "gte"
if comparator == "eq":
result = (stat1.value == stat2.value)
if result is False:
text = "Your %s needs to be equal to %i." % (stat1.name, stat2.value)
elif comparator == "neq":
result = (stat1.value != stat2.value)
if result is False:
text = "Your %s needs to be not equal to %i." % (stat1.name, stat2.value)
elif comparator == "gte":
result = (stat1.value >= stat2.value)
if result is False:
text = "Your %s is too low. Needs to be higher than %i." % (stat1.name, stat2.value)
elif comparator == "lte":
result = (stat1.value <= stat2.value)
if result is False:
text = "Your %s is too high. Needs to be lower than %i." % (stat1.name, stat2.value)
else:
result = (stat1.value >= stat2.value)
if result is False and (hasattr(stat2, "failure_msg") is False or stat2.failure_msg is None):
stat2.failure_msg = text
logging.info("compare():'%s' compare - %i vs %i = %r", comparator,stat1.value, stat2.value, result)
return result | 41.173913 | 130 | 0.593136 |
4df7267c74465cba56a6fc27d359812cdb5be33d | 25,217 | py | Python | allenact/embodiedai/models/basic_models.py | brandontrabucco/allenact | 0f323ac6f67a84a9de76359f5506c44eff64e0a1 | [
"MIT"
] | 187 | 2020-08-28T16:59:41.000Z | 2022-03-27T19:10:11.000Z | allenact/embodiedai/models/basic_models.py | brandontrabucco/allenact | 0f323ac6f67a84a9de76359f5506c44eff64e0a1 | [
"MIT"
] | 120 | 2020-08-28T15:30:36.000Z | 2022-03-13T00:38:44.000Z | allenact/embodiedai/models/basic_models.py | brandontrabucco/allenact | 0f323ac6f67a84a9de76359f5506c44eff64e0a1 | [
"MIT"
] | 45 | 2020-08-28T18:30:04.000Z | 2022-03-29T11:13:28.000Z | """Basic building block torch networks that can be used across a variety of
tasks."""
from typing import (
Sequence,
Dict,
Union,
cast,
List,
Callable,
Optional,
Tuple,
Any,
)
import gym
import numpy as np
import torch
from gym.spaces.dict import Dict as SpaceDict
import torch.nn as nn
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel, DistributionType
from allenact.base_abstractions.distributions import CategoricalDistr, Distr
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact.utils.model_utils import make_cnn, compute_cnn_output
from allenact.utils.system import get_logger
class SimpleCNN(nn.Module):
"""A Simple N-Conv CNN followed by a fully connected layer. Takes in
observations (of type gym.spaces.dict) and produces an embedding of the
`rgb_uuid` and/or `depth_uuid` components.
# Attributes
observation_space : The observation_space of the agent, should have `rgb_uuid` or `depth_uuid` as
a component (otherwise it is a blind model).
output_size : The size of the embedding vector to produce.
"""
def __init__(
self,
observation_space: SpaceDict,
output_size: int,
rgb_uuid: Optional[str],
depth_uuid: Optional[str],
layer_channels: Sequence[int] = (32, 64, 32),
kernel_sizes: Sequence[Tuple[int, int]] = ((8, 8), (4, 4), (3, 3)),
layers_stride: Sequence[Tuple[int, int]] = ((4, 4), (2, 2), (1, 1)),
paddings: Sequence[Tuple[int, int]] = ((0, 0), (0, 0), (0, 0)),
dilations: Sequence[Tuple[int, int]] = ((1, 1), (1, 1), (1, 1)),
flatten: bool = True,
output_relu: bool = True,
):
"""Initializer.
# Parameters
observation_space : See class attributes documentation.
output_size : See class attributes documentation.
"""
super().__init__()
self.rgb_uuid = rgb_uuid
if self.rgb_uuid is not None:
assert self.rgb_uuid in observation_space.spaces
self._n_input_rgb = observation_space.spaces[self.rgb_uuid].shape[2]
assert self._n_input_rgb >= 0
else:
self._n_input_rgb = 0
self.depth_uuid = depth_uuid
if self.depth_uuid is not None:
assert self.depth_uuid in observation_space.spaces
self._n_input_depth = observation_space.spaces[self.depth_uuid].shape[2]
assert self._n_input_depth >= 0
else:
self._n_input_depth = 0
if not self.is_blind:
# hyperparameters for layers
self._cnn_layers_channels = list(layer_channels)
self._cnn_layers_kernel_size = list(kernel_sizes)
self._cnn_layers_stride = list(layers_stride)
self._cnn_layers_paddings = list(paddings)
self._cnn_layers_dilations = list(dilations)
if self._n_input_rgb > 0:
input_rgb_cnn_dims = np.array(
observation_space.spaces[self.rgb_uuid].shape[:2], dtype=np.float32
)
self.rgb_cnn = self.make_cnn_from_params(
output_size=output_size,
input_dims=input_rgb_cnn_dims,
input_channels=self._n_input_rgb,
flatten=flatten,
output_relu=output_relu,
)
if self._n_input_depth > 0:
input_depth_cnn_dims = np.array(
observation_space.spaces[self.depth_uuid].shape[:2],
dtype=np.float32,
)
self.depth_cnn = self.make_cnn_from_params(
output_size=output_size,
input_dims=input_depth_cnn_dims,
input_channels=self._n_input_depth,
flatten=flatten,
output_relu=output_relu,
)
def make_cnn_from_params(
self,
output_size: int,
input_dims: np.ndarray,
input_channels: int,
flatten: bool,
output_relu: bool,
) -> nn.Module:
output_dims = input_dims
for kernel_size, stride, padding, dilation in zip(
self._cnn_layers_kernel_size,
self._cnn_layers_stride,
self._cnn_layers_paddings,
self._cnn_layers_dilations,
):
# noinspection PyUnboundLocalVariable
output_dims = self._conv_output_dim(
dimension=output_dims,
padding=np.array(padding, dtype=np.float32),
dilation=np.array(dilation, dtype=np.float32),
kernel_size=np.array(kernel_size, dtype=np.float32),
stride=np.array(stride, dtype=np.float32),
)
# noinspection PyUnboundLocalVariable
cnn = make_cnn(
input_channels=input_channels,
layer_channels=self._cnn_layers_channels,
kernel_sizes=self._cnn_layers_kernel_size,
strides=self._cnn_layers_stride,
paddings=self._cnn_layers_paddings,
dilations=self._cnn_layers_dilations,
output_height=output_dims[0],
output_width=output_dims[1],
output_channels=output_size,
flatten=flatten,
output_relu=output_relu,
)
self.layer_init(cnn)
return cnn
@staticmethod
def _conv_output_dim(
dimension: Sequence[int],
padding: Sequence[int],
dilation: Sequence[int],
kernel_size: Sequence[int],
stride: Sequence[int],
) -> Tuple[int, ...]:
"""Calculates the output height and width based on the input height and
width to the convolution layer. For parameter definitions see.
[here](https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d).
# Parameters
dimension : See above link.
padding : See above link.
dilation : See above link.
kernel_size : See above link.
stride : See above link.
"""
assert len(dimension) == 2
out_dimension = []
for i in range(len(dimension)):
out_dimension.append(
int(
np.floor(
(
(
dimension[i]
+ 2 * padding[i]
- dilation[i] * (kernel_size[i] - 1)
- 1
)
/ stride[i]
)
+ 1
)
)
)
return tuple(out_dimension)
@staticmethod
def layer_init(cnn) -> None:
"""Initialize layer parameters using Kaiming normal."""
for layer in cnn:
if isinstance(layer, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(layer.weight, nn.init.calculate_gain("relu"))
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
@property
def is_blind(self):
"""True if the observation space doesn't include `self.rgb_uuid` or
`self.depth_uuid`."""
return self._n_input_rgb + self._n_input_depth == 0
def forward(self, observations: Dict[str, torch.Tensor]): # type: ignore
if self.is_blind:
return None
def check_use_agent(new_setting):
if use_agent is not None:
assert (
use_agent is new_setting
), "rgb and depth must both use an agent dim or none"
return new_setting
cnn_output_list: List[torch.Tensor] = []
use_agent: Optional[bool] = None
if self.rgb_uuid is not None:
use_agent = check_use_agent(len(observations[self.rgb_uuid].shape) == 6)
cnn_output_list.append(
compute_cnn_output(self.rgb_cnn, observations[self.rgb_uuid])
)
if self.depth_uuid is not None:
use_agent = check_use_agent(len(observations[self.depth_uuid].shape) == 6)
cnn_output_list.append(
compute_cnn_output(self.depth_cnn, observations[self.depth_uuid])
)
if use_agent:
channels_dim = 3 # [step, sampler, agent, channel (, height, width)]
else:
channels_dim = 2 # [step, sampler, channel (, height, width)]
return torch.cat(cnn_output_list, dim=channels_dim)
class RNNStateEncoder(nn.Module):
"""A simple RNN-based model playing a role in many baseline embodied-
navigation agents.
See `seq_forward` for more details of how this model is used.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
rnn_type: str = "GRU",
trainable_masked_hidden_state: bool = False,
):
"""An RNN for encoding the state in RL. Supports masking the hidden
state during various timesteps in the forward lass.
# Parameters
input_size : The input size of the RNN.
hidden_size : The hidden size.
num_layers : The number of recurrent layers.
rnn_type : The RNN cell type. Must be GRU or LSTM.
trainable_masked_hidden_state : If `True` the initial hidden state (used at the start of a Task)
is trainable (as opposed to being a vector of zeros).
"""
super().__init__()
self._num_recurrent_layers = num_layers
self._rnn_type = rnn_type
self.rnn = getattr(torch.nn, rnn_type)(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers
)
self.trainable_masked_hidden_state = trainable_masked_hidden_state
if trainable_masked_hidden_state:
self.init_hidden_state = nn.Parameter(
0.1 * torch.randn((num_layers, 1, hidden_size)), requires_grad=True
)
self.layer_init()
def layer_init(self):
"""Initialize the RNN parameters in the model."""
for name, param in self.rnn.named_parameters():
if "weight" in name:
nn.init.orthogonal_(param)
elif "bias" in name:
nn.init.constant_(param, 0)
@property
def num_recurrent_layers(self) -> int:
"""The number of recurrent layers in the network."""
return self._num_recurrent_layers * (2 if "LSTM" in self._rnn_type else 1)
def _pack_hidden(
self, hidden_states: Union[torch.FloatTensor, Sequence[torch.FloatTensor]]
) -> torch.FloatTensor:
"""Stacks hidden states in an LSTM together (if using a GRU rather than
an LSTM this is just the identity).
# Parameters
hidden_states : The hidden states to (possibly) stack.
"""
if "LSTM" in self._rnn_type:
hidden_states = cast(
torch.FloatTensor,
torch.cat([hidden_states[0], hidden_states[1]], dim=0),
)
return cast(torch.FloatTensor, hidden_states)
def _unpack_hidden(
self, hidden_states: torch.FloatTensor
) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, torch.FloatTensor]]:
"""Partial inverse of `_pack_hidden` (exact if there are 2 hidden
layers)."""
if "LSTM" in self._rnn_type:
new_hidden_states = (
hidden_states[0 : self._num_recurrent_layers],
hidden_states[self._num_recurrent_layers :],
)
return cast(Tuple[torch.FloatTensor, torch.FloatTensor], new_hidden_states)
return cast(torch.FloatTensor, hidden_states)
def _mask_hidden(
self,
hidden_states: Union[Tuple[torch.FloatTensor, ...], torch.FloatTensor],
masks: torch.FloatTensor,
) -> Union[Tuple[torch.FloatTensor, ...], torch.FloatTensor]:
"""Mask input hidden states given `masks`. Useful when masks represent
steps on which a task has completed.
# Parameters
hidden_states : The hidden states.
masks : Masks to apply to hidden states (see seq_forward).
# Returns
Masked hidden states. Here masked hidden states will be replaced with
either all zeros (if `trainable_masked_hidden_state` was False) and will
otherwise be a learnable collection of parameters.
"""
if not self.trainable_masked_hidden_state:
if isinstance(hidden_states, tuple):
hidden_states = tuple(
cast(torch.FloatTensor, v * masks) for v in hidden_states
)
else:
hidden_states = cast(torch.FloatTensor, masks * hidden_states)
else:
if isinstance(hidden_states, tuple):
# noinspection PyTypeChecker
hidden_states = tuple(
v * masks # type:ignore
+ (1.0 - masks) * (self.init_hidden_state.repeat(1, v.shape[1], 1)) # type: ignore
for v in hidden_states # type:ignore
) # type: ignore
else:
# noinspection PyTypeChecker
hidden_states = masks * hidden_states + (1 - masks) * ( # type: ignore
self.init_hidden_state.repeat(1, hidden_states.shape[1], 1)
)
return hidden_states
def single_forward(
self,
x: torch.FloatTensor,
hidden_states: torch.FloatTensor,
masks: torch.FloatTensor,
) -> Tuple[
torch.FloatTensor, Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]
]:
"""Forward for a single-step input."""
(
x,
hidden_states,
masks,
mem_agent,
obs_agent,
nsteps,
nsamplers,
nagents,
) = self.adapt_input(x, hidden_states, masks)
unpacked_hidden_states = self._unpack_hidden(hidden_states)
x, unpacked_hidden_states = self.rnn(
x,
self._mask_hidden(
unpacked_hidden_states, cast(torch.FloatTensor, masks[0].view(1, -1, 1))
),
)
return self.adapt_result(
x,
self._pack_hidden(unpacked_hidden_states),
mem_agent,
obs_agent,
nsteps,
nsamplers,
nagents,
)
def adapt_input(
self,
x: torch.FloatTensor,
hidden_states: torch.FloatTensor,
masks: torch.FloatTensor,
) -> Tuple[
torch.FloatTensor,
torch.FloatTensor,
torch.FloatTensor,
bool,
bool,
int,
int,
int,
]:
nsteps, nsamplers = masks.shape[:2]
assert len(hidden_states.shape) in [
3,
4,
], "hidden_states must be [layer, sampler, hidden] or [layer, sampler, agent, hidden]"
assert len(x.shape) in [
3,
4,
], "observations must be [step, sampler, data] or [step, sampler, agent, data]"
nagents = 1
mem_agent: bool
if len(hidden_states.shape) == 4: # [layer, sampler, agent, hidden]
mem_agent = True
nagents = hidden_states.shape[2]
else: # [layer, sampler, hidden]
mem_agent = False
obs_agent: bool
if len(x.shape) == 4: # [step, sampler, agent, dims]
obs_agent = True
else: # [step, sampler, dims]
obs_agent = False
# Flatten (nsamplers, nagents)
x = x.view(nsteps, nsamplers * nagents, -1) # type:ignore
masks = masks.expand(-1, -1, nagents).reshape( # type:ignore
nsteps, nsamplers * nagents
)
# Flatten (nsamplers, nagents) and remove step dim
hidden_states = hidden_states.view( # type:ignore
self.num_recurrent_layers, nsamplers * nagents, -1
)
# noinspection PyTypeChecker
return x, hidden_states, masks, mem_agent, obs_agent, nsteps, nsamplers, nagents
def adapt_result(
self,
outputs: torch.FloatTensor,
hidden_states: torch.FloatTensor,
mem_agent: bool,
obs_agent: bool,
nsteps: int,
nsamplers: int,
nagents: int,
) -> Tuple[
torch.FloatTensor, torch.FloatTensor,
]:
output_dims = (nsteps, nsamplers) + ((nagents, -1) if obs_agent else (-1,))
hidden_dims = (self.num_recurrent_layers, nsamplers) + (
(nagents, -1) if mem_agent else (-1,)
)
outputs = cast(torch.FloatTensor, outputs.view(*output_dims))
hidden_states = cast(torch.FloatTensor, hidden_states.view(*hidden_dims),)
return outputs, hidden_states
def seq_forward( # type: ignore
self,
x: torch.FloatTensor,
hidden_states: torch.FloatTensor,
masks: torch.FloatTensor,
) -> Tuple[
torch.FloatTensor, Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]
]:
"""Forward for a sequence of length T.
# Parameters
x : (Steps, Samplers, Agents, -1) tensor.
hidden_states : The starting hidden states.
masks : A (Steps, Samplers, Agents) tensor.
The masks to be applied to hidden state at every timestep, equal to 0 whenever the previous step finalized
the task, 1 elsewhere.
"""
(
x,
hidden_states,
masks,
mem_agent,
obs_agent,
nsteps,
nsamplers,
nagents,
) = self.adapt_input(x, hidden_states, masks)
# steps in sequence which have zero for any episode. Assume t=0 has
# a zero in it.
has_zeros = (masks[1:] == 0.0).any(dim=-1).nonzero().squeeze().cpu()
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# handle scalar
has_zeros = [has_zeros.item() + 1] # type: ignore
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = cast(List[int], [0] + has_zeros + [nsteps])
unpacked_hidden_states = self._unpack_hidden(
cast(torch.FloatTensor, hidden_states)
)
outputs = []
for i in range(len(has_zeros) - 1):
# process steps that don't have any zeros in masks together
start_idx = int(has_zeros[i])
end_idx = int(has_zeros[i + 1])
# noinspection PyTypeChecker
rnn_scores, unpacked_hidden_states = self.rnn(
x[start_idx:end_idx],
self._mask_hidden(
unpacked_hidden_states,
cast(torch.FloatTensor, masks[start_idx].view(1, -1, 1)),
),
)
outputs.append(rnn_scores)
return self.adapt_result(
cast(torch.FloatTensor, torch.cat(outputs, dim=0)),
self._pack_hidden(unpacked_hidden_states),
mem_agent,
obs_agent,
nsteps,
nsamplers,
nagents,
)
def forward( # type: ignore
self,
x: torch.FloatTensor,
hidden_states: torch.FloatTensor,
masks: torch.FloatTensor,
) -> Tuple[
torch.FloatTensor, Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]
]:
nsteps = masks.shape[0]
if nsteps == 1:
return self.single_forward(x, hidden_states, masks)
return self.seq_forward(x, hidden_states, masks)
class LinearActorCritic(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
input_uuid: str,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
):
super().__init__(action_space=action_space, observation_space=observation_space)
assert (
input_uuid in observation_space.spaces
), "LinearActorCritic expects only a single observational input."
self.input_uuid = input_uuid
box_space: gym.spaces.Box = observation_space[self.input_uuid]
assert isinstance(box_space, gym.spaces.Box), (
"LinearActorCritic requires that"
"observation space corresponding to the input uuid is a Box space."
)
assert len(box_space.shape) == 1
self.in_dim = box_space.shape[0]
self.linear = nn.Linear(self.in_dim, action_space.n + 1)
nn.init.orthogonal_(self.linear.weight)
nn.init.constant_(self.linear.bias, 0)
# noinspection PyMethodMayBeStatic
def _recurrent_memory_specification(self):
return None
def forward(self, observations, memory, prev_actions, masks):
out = self.linear(observations[self.input_uuid])
# noinspection PyArgumentList
return (
ActorCriticOutput(
# ensure [steps, samplers, ...]
distributions=CategoricalDistr(logits=out[..., :-1]),
# ensure [steps, samplers, flattened]
values=cast(torch.FloatTensor, out[..., -1:].view(*out.shape[:2], -1)),
extras={},
),
None,
)
class RNNActorCritic(ActorCriticModel[Distr]):
def __init__(
self,
input_uuid: str,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
hidden_size: int = 128,
num_layers: int = 1,
rnn_type: str = "GRU",
head_type: Callable[..., ActorCriticModel[Distr]] = LinearActorCritic,
):
super().__init__(action_space=action_space, observation_space=observation_space)
self.hidden_size = hidden_size
self.rnn_type = rnn_type
assert (
input_uuid in observation_space.spaces
), "LinearActorCritic expects only a single observational input."
self.input_uuid = input_uuid
box_space: gym.spaces.Box = observation_space[self.input_uuid]
assert isinstance(box_space, gym.spaces.Box), (
"RNNActorCritic requires that"
"observation space corresponding to the input uuid is a Box space."
)
assert len(box_space.shape) == 1
self.in_dim = box_space.shape[0]
self.state_encoder = RNNStateEncoder(
input_size=self.in_dim,
hidden_size=hidden_size,
num_layers=num_layers,
rnn_type=rnn_type,
trainable_masked_hidden_state=True,
)
self.head_uuid = "{}_{}".format("rnn", input_uuid)
self.ac_nonrecurrent_head: ActorCriticModel[Distr] = head_type(
input_uuid=self.head_uuid,
action_space=action_space,
observation_space=SpaceDict(
{
self.head_uuid: gym.spaces.Box(
low=np.float32(0.0), high=np.float32(1.0), shape=(hidden_size,)
)
}
),
)
self.memory_key = "rnn"
@property
def recurrent_hidden_state_size(self) -> int:
return self.hidden_size
@property
def num_recurrent_layers(self) -> int:
return self.state_encoder.num_recurrent_layers
def _recurrent_memory_specification(self):
return {
self.memory_key: (
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
}
def forward( # type:ignore
self,
observations: Dict[str, Union[torch.FloatTensor, Dict[str, Any]]],
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
if self.memory_key not in memory:
get_logger().warning(
f"Key {self.memory_key} not found in memory,"
f" initializing this as all zeros."
)
obs = observations[self.input_uuid]
memory.check_append(
key=self.memory_key,
tensor=obs.new(
self.num_recurrent_layers,
obs.shape[1],
self.recurrent_hidden_state_size,
)
.float()
.zero_(),
sampler_dim=1,
)
rnn_out, mem_return = self.state_encoder(
x=observations[self.input_uuid],
hidden_states=memory.tensor(self.memory_key),
masks=masks,
)
# noinspection PyCallingNonCallable
out, _ = self.ac_nonrecurrent_head(
observations={self.head_uuid: rnn_out},
memory=None,
prev_actions=prev_actions,
masks=masks,
)
# noinspection PyArgumentList
return (
out,
memory.set_tensor(self.memory_key, mem_return),
)
| 34.031039 | 118 | 0.574057 |
2c97595cd63b22cbd2c65a7cf8fe0b70a8b285f6 | 2,065 | py | Python | appengine/findit/waterfall/trigger_flake_swarming_task_pipeline.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | 1 | 2018-01-02T05:47:07.000Z | 2018-01-02T05:47:07.000Z | appengine/findit/waterfall/trigger_flake_swarming_task_pipeline.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | appengine/findit/waterfall/trigger_flake_swarming_task_pipeline.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from model.flake.flake_swarming_task import FlakeSwarmingTask
from waterfall import monitoring
from waterfall import waterfall_config
from waterfall.trigger_base_swarming_task_pipeline import(
TriggerBaseSwarmingTaskPipeline)
class TriggerFlakeSwarmingTaskPipeline(TriggerBaseSwarmingTaskPipeline):
"""A pipeline to check if selected tests of a step are flaky.
This pipeline only supports test steps that run on Swarming and support the
gtest filter.
"""
def _GetArgs(self, master_name, builder_name, build_number, step_name, tests):
test_name = tests[0] # Only one test per pipeline.
return (master_name, builder_name, build_number, step_name, test_name)
# pylint: disable=arguments-differ
def _GetSwarmingTask(self, master_name, builder_name, build_number,
step_name, test_name):
# Get the appropriate kind of Swarming Task (Flake).
swarming_task = FlakeSwarmingTask.Get(
master_name, builder_name, build_number, step_name, test_name)
return swarming_task
# pylint: disable=arguments-differ
def _CreateSwarmingTask(self, master_name, builder_name, build_number,
step_name, test_name):
# Create the appropriate kind of Swarming Task (Flake).
swarming_task = FlakeSwarmingTask.Create(
master_name, builder_name, build_number, step_name, test_name)
return swarming_task
def _GetIterationsToRerun(self):
flake_settings = waterfall_config.GetCheckFlakeSettings()
swarming_rerun_settings = flake_settings.get('swarming_rerun', {})
return swarming_rerun_settings.get('iterations_to_rerun', 100)
def _OnTaskTriggered(self): # pragma: no cover.
monitoring.swarming_tasks.increment(
{'operation': 'trigger', 'category': 'identify-regression-range'})
def _GetAdditionalTags(self): # pragma: no cover.
return ['purpose:identify-regression-range']
| 41.3 | 80 | 0.756416 |
92de9036cc98cc4354769f78c959bacc51cae385 | 14,762 | py | Python | scipy/signal/__init__.py | liviofetahu/scipy | 96430c5f0eb0a2724c61b189025f3ebed7fec01f | [
"BSD-3-Clause"
] | 1 | 2021-05-03T06:55:01.000Z | 2021-05-03T06:55:01.000Z | scipy/signal/__init__.py | liviofetahu/scipy | 96430c5f0eb0a2724c61b189025f3ebed7fec01f | [
"BSD-3-Clause"
] | null | null | null | scipy/signal/__init__.py | liviofetahu/scipy | 96430c5f0eb0a2724c61b189025f3ebed7fec01f | [
"BSD-3-Clause"
] | null | null | null | """
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-D convolution.
correlate -- N-D correlation.
fftconvolve -- N-D convolution using the FFT.
oaconvolve -- N-D convolution using the overlap-add method.
convolve2d -- 2-D convolution (more options).
correlate2d -- 2-D correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
choose_conv_method -- Chooses faster of FFT and direct convolution methods.
correlation_lags -- Determines lag indices for 1D cross-correlation.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-D order filter.
medfilt -- N-D median filter.
medfilt2d -- 2-D median filter (faster).
wiener -- N-D Wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-D FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-D deconvolution using lfilter.
sosfilt -- 1-D IIR digital linear filtering using
-- a second-order sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
sosfiltfilt -- A forward-backward filter for second-order sections.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
resample_poly -- Resample using polyphase filtering method.
upfirdn -- Upsample, apply FIR filter, downsample.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
bilinear_zpk -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firls -- FIR filter design using least-squares error minimization.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response from TF coefficients.
freqs_zpk -- Analog filter frequency response from ZPK coefficients.
freqz -- Digital filter frequency response from TF coefficients.
freqz_zpk -- Digital filter frequency response from ZPK coefficients.
sosfreqz -- Digital filter frequency response for SOS format filter.
gammatone -- FIR and IIR gammatone filter design.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
minimum_phase -- Convert a linear phase FIR filter to minimum phase.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients.
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
iirnotch -- Design second-order IIR notch digital filter.
iirpeak -- Design second-order IIR peak (resonant) digital filter.
iircomb -- Design IIR comb filter.
Continuous-time linear systems
==============================
.. autosummary::
:toctree: generated/
lti -- Continuous-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- Continuous-time simulation of output to linear system.
lsim2 -- Like lsim, but `scipy.integrate.odeint` is used.
impulse -- Impulse response of linear, time-invariant (LTI) system.
impulse2 -- Like impulse, but `scipy.integrate.odeint` is used.
step -- Step response of continuous-time LTI system.
step2 -- Like step, but `scipy.integrate.odeint` is used.
freqresp -- Frequency response of a continuous-time LTI system.
bode -- Bode magnitude and phase data (continuous-time LTI).
Discrete-time linear systems
============================
.. autosummary::
:toctree: generated/
dlti -- Discrete-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
dlsim -- Simulation of output to a discrete-time linear system.
dimpulse -- Impulse response of a discrete-time LTI system.
dstep -- Step response of a discrete-time LTI system.
dfreqresp -- Frequency response of a discrete-time LTI system.
dbode -- Bode magnitude and phase data (discrete-time LTI).
LTI representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- Transfer function to zero-pole-gain.
tf2sos -- Transfer function to second-order sections.
tf2ss -- Transfer function to state-space.
zpk2tf -- Zero-pole-gain to transfer function.
zpk2sos -- Zero-pole-gain to second-order sections.
zpk2ss -- Zero-pole-gain to state-space.
ss2tf -- State-pace to transfer function.
ss2zpk -- State-space to pole-zero-gain.
sos2zpk -- Second-order sections to zero-pole-gain.
sos2tf -- Second-order sections to transfer function.
cont2discrete -- Continuous-time to discrete-time LTI conversion.
place_poles -- Pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid.
max_len_seq -- Maximum length sequence.
sawtooth -- Periodic sawtooth.
square -- Square wave.
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial.
unit_impulse -- Discrete unit impulse.
Window functions
================
For window functions, see the `scipy.signal.windows` namespace.
In the `scipy.signal` namespace, there is a convenience function to
obtain these windows by name:
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- Compute scaling function and wavelet from coefficients.
daub -- Return low-pass.
morlet -- Complex Morlet wavelet.
qmf -- Return quadrature mirror filter from low-pass.
ricker -- Return ricker wavelet.
morlet2 -- Return Morlet wavelet, compatible with cwt.
cwt -- Perform continuous wavelet transform.
Peak finding
============
.. autosummary::
:toctree: generated/
argrelmin -- Calculate the relative minima of data.
argrelmax -- Calculate the relative maxima of data.
argrelextrema -- Calculate the relative extrema of data.
find_peaks -- Find a subset of peaks inside a signal.
find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation.
peak_prominences -- Calculate the prominence of each peak in a signal.
peak_widths -- Calculate the width of each peak in a signal.
Spectral analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram.
welch -- Compute a periodogram using Welch's method.
csd -- Compute the cross spectral density, using Welch's method.
coherence -- Compute the magnitude squared coherence, using Welch's method.
spectrogram -- Compute the spectrogram.
lombscargle -- Computes the Lomb-Scargle periodogram.
vectorstrength -- Computes the vector strength.
stft -- Compute the Short Time Fourier Transform.
istft -- Compute the Inverse Short Time Fourier Transform.
check_COLA -- Check the COLA constraint for iSTFT reconstruction.
check_NOLA -- Check the NOLA constraint for iSTFT reconstruction.
"""
from . import _sigtools, windows
from ._waveforms import *
from ._max_len_seq import max_len_seq
from ._upfirdn import upfirdn
from ._spline import ( # noqa: F401
cspline2d,
qspline2d,
sepfir2d,
symiirorder1,
symiirorder2,
)
from ._bsplines import *
from ._filter_design import *
from ._fir_filter_design import *
from ._ltisys import *
from ._lti_conversion import *
from ._signaltools import *
from ._savitzky_golay import savgol_coeffs, savgol_filter
from ._spectral_py import *
from ._wavelets import *
from ._peak_finding import *
from .windows import get_window # keep this one in signal namespace
# deal with * -> windows.* doc-only soft-deprecation
deprecated_windows = ('boxcar', 'triang', 'parzen', 'bohman', 'blackman',
'nuttall', 'blackmanharris', 'flattop', 'bartlett',
'barthann', 'hamming', 'kaiser', 'gaussian',
'general_gaussian', 'chebwin', 'cosine',
'hann', 'exponential', 'tukey')
# backward compatibility imports for actually deprecated windows not
# in the above list
from .windows import hanning
def deco(name):
f = getattr(windows, name)
# Add deprecation to docstring
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
wrapped.__name__ = name
wrapped.__module__ = 'scipy.signal'
if hasattr(f, '__qualname__'):
wrapped.__qualname__ = f.__qualname__
if f.__doc__:
lines = f.__doc__.splitlines()
for li, line in enumerate(lines):
if line.strip() == 'Parameters':
break
else:
raise RuntimeError('dev error: badly formatted doc')
spacing = ' ' * line.find('P')
lines.insert(li, ('{0}.. warning:: scipy.signal.{1} is deprecated,\n'
'{0} use scipy.signal.windows.{1} '
'instead.\n'.format(spacing, name)))
wrapped.__doc__ = '\n'.join(lines)
return wrapped
for name in deprecated_windows:
locals()[name] = deco(name)
del deprecated_windows, name, deco
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 40.00542 | 83 | 0.656076 |
3816235d35bd68470c789df5b9c5f123d0d874b0 | 8,637 | py | Python | code/client/munkilib/dmgutils.py | grahamgilbert/munki | 9ddc5a063a92b7f7671bddd679db3fbe7cb860b6 | [
"Apache-2.0"
] | null | null | null | code/client/munkilib/dmgutils.py | grahamgilbert/munki | 9ddc5a063a92b7f7671bddd679db3fbe7cb860b6 | [
"Apache-2.0"
] | null | null | null | code/client/munkilib/dmgutils.py | grahamgilbert/munki | 9ddc5a063a92b7f7671bddd679db3fbe7cb860b6 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
#
# Copyright 2009-2019 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
dmgutils.py
Created by Greg Neagle on 2016-12-13.
Utilities for working with disk images.
"""
from __future__ import absolute_import, print_function
import os
import subprocess
from . import display
from . import utils
from .wrappers import readPlistFromString, PlistReadError
# we use lots of camelCase-style names. Deal with it.
# pylint: disable=C0103
# dmg helpers
def DMGisWritable(dmgpath):
'''Attempts to determine if the given disk image is writable'''
proc = subprocess.Popen(
['/usr/bin/hdiutil', 'imageinfo', dmgpath, '-plist'],
bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
if err:
display.display_error(
u'hdiutil error %s with image %s.', err, dmgpath)
(pliststr, out) = utils.getFirstPlist(out)
if pliststr:
try:
plist = readPlistFromString(pliststr)
dmg_format = plist.get('Format')
if dmg_format in ['UDSB', 'UDSP', 'UDRW', 'RdWr']:
return True
except PlistReadError:
pass
return False
def dmg_has_sla(dmgpath):
'''Returns true if dmg has a Software License Agreement.
These dmgs normally cannot be attached without user intervention'''
has_sla = False
proc = subprocess.Popen(
['/usr/bin/hdiutil', 'imageinfo', dmgpath, '-plist'],
bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
if err:
display.display_error(
u'hdiutil error %s with image %s.', err, dmgpath)
(pliststr, out) = utils.getFirstPlist(out)
if pliststr:
try:
plist = readPlistFromString(pliststr)
properties = plist.get('Properties')
if properties:
has_sla = properties.get('Software License Agreement', False)
except PlistReadError:
pass
return has_sla
def hdiutil_info():
"""
Convenience method for running 'hdiutil info -plist'
Returns the root object parsed with readPlistFromString()
"""
proc = subprocess.Popen(
['/usr/bin/hdiutil', 'info', '-plist'],
bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
if err:
display.display_error(u'hdiutil info error: %s', err.decode("UTF-8"))
(pliststr, out) = utils.getFirstPlist(out)
if pliststr:
try:
plist = readPlistFromString(pliststr)
return plist
except PlistReadError:
pass
return None
def diskImageIsMounted(dmgpath):
"""
Returns true if the given disk image is currently mounted
"""
isMounted = False
infoplist = hdiutil_info()
for imageProperties in infoplist.get('images'):
if 'image-path' in imageProperties:
imagepath = imageProperties['image-path']
if imagepath == dmgpath:
for entity in imageProperties.get('system-entities', []):
if entity.get('mount-point'):
isMounted = True
break
return isMounted
def pathIsVolumeMountPoint(path):
"""
Checks if the given path is a volume for an attached disk image
Returns true if the given path is a mount point or false if it isn't
"""
isMountPoint = False
infoplist = hdiutil_info()
for imageProperties in infoplist.get('images'):
if 'image-path' in imageProperties:
for entity in imageProperties.get('system-entities', []):
if 'mount-point' in entity:
mountpoint = entity['mount-point']
if path == mountpoint:
isMountPoint = True
break
return isMountPoint
def diskImageForMountPoint(path):
"""
Resolves the given mount point path to an attached disk image path
Returns a path to a disk image file or None if the path is not
a valid mount point
"""
dmgpath = None
infoplist = hdiutil_info()
for imageProperties in infoplist.get('images'):
if 'image-path' in imageProperties:
imagepath = imageProperties['image-path']
for entity in imageProperties.get('system-entities', []):
if 'mount-point' in entity:
mountpoint = entity['mount-point']
if os.path.samefile(path, mountpoint):
dmgpath = imagepath
return dmgpath
def mount_points_for_disk_image(dmgpath):
"""
Returns a list of mountpoints for the given disk image
"""
mountpoints = []
infoplist = hdiutil_info()
for imageProperties in infoplist.get('images'):
if 'image-path' in imageProperties:
imagepath = imageProperties['image-path']
if imagepath == dmgpath:
for entity in imageProperties.get('system-entities', []):
if 'mount-point' in entity:
mountpoints.append(entity['mount-point'])
break
return mountpoints
def mountdmg(dmgpath, use_shadow=False, use_existing_mounts=False,
random_mountpoint=True):
"""
Attempts to mount the dmg at dmgpath
and returns a list of mountpoints
If use_shadow is true, mount image with shadow file
If random_mountpoint, mount at random dir under /tmp
"""
mountpoints = []
dmgname = os.path.basename(dmgpath)
if use_existing_mounts:
# Check if this dmg is already mounted
# and if so, bail out and return the mountpoints
if diskImageIsMounted(dmgpath):
mountpoints = mount_points_for_disk_image(dmgpath)
return mountpoints
# Attempt to mount the dmg
stdin = b''
if dmg_has_sla(dmgpath):
stdin = b'Y\n'
display.display_detail(
'NOTE: %s has embedded Software License Agreement' % dmgname)
cmd = ['/usr/bin/hdiutil', 'attach', dmgpath, '-nobrowse', '-plist']
if random_mountpoint:
cmd.extend(['-mountRandom', '/tmp'])
if use_shadow:
cmd.append('-shadow')
proc = subprocess.Popen(cmd,
bufsize=-1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(out, err) = proc.communicate(stdin)
if proc.returncode:
display.display_error(
u'Error: "%s" while mounting %s.'
% (err.decode('UTF-8').rstrip(), dmgname))
(pliststr, out) = utils.getFirstPlist(out)
if pliststr:
try:
plist = readPlistFromString(pliststr)
for entity in plist.get('system-entities', []):
if 'mount-point' in entity:
mountpoints.append(entity['mount-point'])
except PlistReadError as err:
display.display_error("%s" % err)
display.display_error(
'Bad plist string returned when mounting diskimage %s:\n%s'
% (dmgname, pliststr))
return mountpoints
def unmountdmg(mountpoint):
"""
Unmounts the dmg at mountpoint
"""
cmd = ['/usr/bin/hdiutil', 'detach', mountpoint]
proc = subprocess.Popen(cmd, bufsize=-1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(dummy_output, err) = proc.communicate()
if proc.returncode:
# ordinary unmount unsuccessful, try forcing
display.display_warning('Polite unmount failed: %s' % err)
display.display_warning('Attempting to force unmount %s' % mountpoint)
cmd.append('-force')
proc = subprocess.Popen(cmd, bufsize=-1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(dummy_output, err) = proc.communicate()
if proc.returncode:
display.display_warning(
'Failed to unmount %s: %s', mountpoint, err.decode("UTF-8"))
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
| 34.003937 | 78 | 0.619081 |
b00f513a0a62c9a2e224ae6fef181574a870c2a7 | 5,560 | py | Python | scripts/triggers_vm_generator.py | nlamirault/click-to-deploy | c82366a184e0293e02253adadae736bb7ff28c9c | [
"Apache-2.0"
] | 616 | 2018-07-18T16:16:02.000Z | 2022-03-11T07:52:07.000Z | scripts/triggers_vm_generator.py | nlamirault/click-to-deploy | c82366a184e0293e02253adadae736bb7ff28c9c | [
"Apache-2.0"
] | 965 | 2018-07-18T13:04:38.000Z | 2022-03-31T17:20:18.000Z | scripts/triggers_vm_generator.py | nlamirault/click-to-deploy | c82366a184e0293e02253adadae736bb7ff28c9c | [
"Apache-2.0"
] | 428 | 2018-07-20T14:35:17.000Z | 2022-03-29T07:02:50.000Z | #!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import multiprocessing.pool
import os
import subprocess
import sys
import yaml
CLOUDBUILD_CONFIG_FILE = 'cloudbuild-vm.yaml'
COOKBOOKS_DIR = 'vm/chef/cookbooks'
PACKER_DIR = 'vm/packer/templates'
TESTS_DIR = 'vm/tests/solutions/spec'
_COOKBOOKS = {}
class VmTriggerConfig(object):
"""Generates GCB trigger for VM solution."""
def __init__(self, solution, knife_binary):
self._solution = solution
self._knife_binary = knife_binary
@property
def packer_run_list(self):
"""Returns Chef's run_list from Packer's template."""
with open(os.path.join(self.packer_dir, 'packer.in.json')) as json_file:
data = json.load(json_file)
run_list = data['chef']['run_list']
return [cookbook.split('::', 1)[0] for cookbook in run_list]
@property
def should_include_test(self):
"""Returns whether solution has tests."""
return True
@property
def packer_dir(self):
"""Returns path to the Packer's template directory."""
return os.path.join(PACKER_DIR, self._solution)
@property
def tests_dir(self):
"""Returns path to the tests directory."""
return os.path.join(TESTS_DIR, self._solution)
@property
def included_files(self):
"""Returns list of included files."""
included_files = [
os.path.join(self.packer_dir, '**'), CLOUDBUILD_CONFIG_FILE
]
if self.should_include_test:
included_files.append(os.path.join(self.tests_dir, '**'))
for cookbook in self.packer_run_list:
included_files.extend([
os.path.join(COOKBOOKS_DIR, dep, '**') for dep in get_cookbook_deps(
cookbook=cookbook, knife_binary=self._knife_binary)
])
included_files = self._remove_duplicates(included_files)
return included_files
def _remove_duplicates(self, included_files):
"""Removes duplicates from a List."""
final_list = []
for num in included_files:
if num not in final_list:
final_list.append(num)
return final_list
def generate_config(self, included_files):
"""Generates GCB trigger config."""
included_files.sort()
trigger = {
'description': 'Trigger for VM %s' % self._solution,
'filename': CLOUDBUILD_CONFIG_FILE,
'github': {
'name': 'click-to-deploy',
'owner': 'GoogleCloudPlatform',
'pullRequest': {
'branch': '.*',
'commentControl': 'COMMENTS_ENABLED'
}
},
'includedFiles': included_files,
'substitutions': {
'_SOLUTION_NAME': self._solution
}
}
return trigger
class CreateThreadPoolAndWait(object):
"""Creates thread pool and wait for all jobs to finish.
For example:
with CreateThreadPoolAndWait() as pool:
result1=pool.apply_async(func1)
result2=pool.apply_async(func2)
"""
def __init__(self):
self._pool = multiprocessing.pool.ThreadPool()
def __enter__(self):
return self._pool
def __exit__(self, exc_type, exc_val, exc_tb):
self._pool.close()
self._pool.join()
def invoke_shell(args):
"""Invokes a shell command."""
logging.debug('Executing command: %s', args)
child = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, _ = child.communicate()
exit_code = child.returncode
return output.decode('utf-8'), exit_code
def get_cookbook_deps(cookbook, knife_binary):
"""Returns cookbooks dependencies."""
if cookbook in _COOKBOOKS:
# do not check cookbook twice
return _COOKBOOKS[cookbook]
command = [
knife_binary, 'deps', '--config-option',
'cookbook_path=%s' % COOKBOOKS_DIR,
os.path.join('/cookbooks', cookbook)
]
deps, exit_code = invoke_shell(command)
assert exit_code == 0, exit_code
deps = [dep.replace('/cookbooks/', '') for dep in deps.splitlines()]
_COOKBOOKS[cookbook] = deps
return deps
def get_solutions_list():
"""Returns list of solutions."""
listdir = [
f for f in os.listdir(PACKER_DIR)
if os.path.isdir(os.path.join(PACKER_DIR, f))
]
listdir.sort()
return listdir
def generate_config(solution, knife_binary):
trigger = VmTriggerConfig(solution=solution, knife_binary=knife_binary)
included_files = trigger.included_files
return trigger.generate_config(included_files)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--knife_binary', type=str, default='knife', help='knife-solo binary')
args = parser.parse_args()
listdir = get_solutions_list()
with CreateThreadPoolAndWait() as pool:
triggers_results = [
pool.apply_async(generate_config, (solution, args.knife_binary))
for solution in listdir
]
triggers = [result.get() for result in triggers_results]
print(yaml.dump_all(triggers, default_flow_style=False))
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
os.sys.exit(main())
| 27.8 | 80 | 0.691187 |
25e4791ae93316d61cdd7ffce63179d379523966 | 676 | py | Python | t_speech_assisstant/venv/lib/python3.7/site-packages/OSAKit/__init__.py | TEgo17/SpeechAssistant | 0e16dd4757f1b05f08ecbb4f4956b2ccd1a63c10 | [
"MIT"
] | 1 | 2020-09-01T16:24:50.000Z | 2020-09-01T16:24:50.000Z | t_speech_assisstant/venv/lib/python3.7/site-packages/OSAKit/__init__.py | TEgo17/SpeechAssistant | 0e16dd4757f1b05f08ecbb4f4956b2ccd1a63c10 | [
"MIT"
] | 1 | 2021-01-28T20:40:19.000Z | 2021-01-28T20:40:19.000Z | t_speech_assisstant/venv/lib/python3.7/site-packages/OSAKit/__init__.py | TEgo17/SpeechAssistant | 0e16dd4757f1b05f08ecbb4f4956b2ccd1a63c10 | [
"MIT"
] | null | null | null | """
Python mapping for the OSAKit framework.
This module does not contain docstrings for the wrapped code, check Apple's
documentation for details on how to use these functions and classes.
"""
import objc
import sys
import Cocoa
from OSAKit import _metadata
sys.modules["OSAKit"] = mod = objc.ObjCLazyModule(
"OSAKit",
"com.apple.OSAKit",
objc.pathForFramework("/System/Library/Frameworks/OSAKit.framework"),
_metadata.__dict__,
None,
{
"__doc__": __doc__,
"objc": objc,
"__path__": __path__,
"__loader__": globals().get("__loader__", None),
},
(Cocoa,),
)
import sys
del sys.modules["OSAKit._metadata"]
| 20.484848 | 75 | 0.677515 |
2d9528bab32c0b0fbe48f02b7812cf5bc7e16b90 | 2,488 | py | Python | tools/local_speedy.py | 7erry/iot | 90ee5be1e5ba594ac202659e1cddfb755c333ea1 | [
"Apache-2.0"
] | null | null | null | tools/local_speedy.py | 7erry/iot | 90ee5be1e5ba594ac202659e1cddfb755c333ea1 | [
"Apache-2.0"
] | null | null | null | tools/local_speedy.py | 7erry/iot | 90ee5be1e5ba594ac202659e1cddfb755c333ea1 | [
"Apache-2.0"
] | 1 | 2021-12-16T08:44:40.000Z | 2021-12-16T08:44:40.000Z | #!/usr/bin/python
import sys
import math
import urllib
import httplib
import time
import random
id = '123456789012345'
server = 'localhost:5055'
period = 1
step = 0.001
device_speed = 70
driver_id = '123456'
waypoints = [
(34.079401, -84.221641),
(34.079396, -84.221554),
(34.079539, -84.221560),
(34.079528, -84.221622),
(34.079501, -84.221679)
]
points = []
for i in range(0, len(waypoints)):
(lat1, lon1) = waypoints[i]
(lat2, lon2) = waypoints[(i + 1) % len(waypoints)]
length = math.sqrt((lat2 - lat1) ** 2 + (lon2 - lon1) ** 2)
count = int(math.ceil(length / step))
for j in range(0, count):
lat = lat1 + (lat2 - lat1) * j / count
lon = lon1 + (lon2 - lon1) * j / count
points.append((lat, lon))
def send(conn, lat, lon, course, speed, alarm, ignition, accuracy, rpm, fuel, driverUniqueId):
params = (('id', id), ('timestamp', int(time.time())), ('lat', lat), ('lon', lon), ('bearing', course), ('speed', speed))
if alarm:
params = params + (('alarm', 'sos'),)
if ignition:
params = params + (('ignition', 'true'),)
if accuracy:
params = params + (('accuracy', accuracy),)
if rpm:
params = params + (('rpm', rpm),)
if fuel:
params = params + (('fuel', fuel),)
if driverUniqueId:
params = params + (('driverUniqueId', driverUniqueId),)
conn.request('GET', '?' + urllib.urlencode(params))
conn.getresponse().read()
def course(lat1, lon1, lat2, lon2):
lat1 = lat1 * math.pi / 180
lon1 = lon1 * math.pi / 180
lat2 = lat2 * math.pi / 180
lon2 = lon2 * math.pi / 180
y = math.sin(lon2 - lon1) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1)
return (math.atan2(y, x) % (2 * math.pi)) * 180 / math.pi
index = 0
conn = httplib.HTTPConnection(server)
while True:
(lat1, lon1) = points[index % len(points)]
(lat2, lon2) = points[(index + 1) % len(points)]
speed = device_speed if (index % len(points)) != 0 else 0
alarm = (index % 10) == 0
ignition = (index % len(points)) != 0
accuracy = 100 if (index % 10) == 0 else 0
rpm = random.randint(500, 4000)
fuel = random.randint(0, 80)
driverUniqueId = driver_id if (index % len(points)) == 0 else False
send(conn, lat1, lon1, course(lat1, lon1, lat2, lon2), speed, alarm, ignition, accuracy, rpm, fuel, driverUniqueId)
time.sleep(period)
index += 1
| 31.1 | 125 | 0.590434 |
f2c9f41525ce59a6733bb6e5da25e4c45e17fdcf | 576 | py | Python | app/api/events.py | dankolbman/freets-web | d68e65df930f307adf4f00b1ad4ee602393dff93 | [
"MIT"
] | 2 | 2015-02-22T05:00:28.000Z | 2015-03-17T22:41:40.000Z | app/api/events.py | dankolbman/freets-web | d68e65df930f307adf4f00b1ad4ee602393dff93 | [
"MIT"
] | null | null | null | app/api/events.py | dankolbman/freets-web | d68e65df930f307adf4f00b1ad4ee602393dff93 | [
"MIT"
] | 2 | 2015-02-22T15:11:50.000Z | 2015-02-24T19:36:27.000Z | from flask import jsonify, request
from .. import db
from ..models import Event
from . import api
# Get event
@api.route('/event/<int:id>')
def get_event(id):
event = Event.query.get_or_404(id)
# TODO: Implement when the model updates
attendees_count = 1
json_post = event.to_json()
# TODO: Change when new model is available
json_post['num_attendees'] = attendees_count
return jsonify(json_post)
# Returns lim users attending the event
@api.route('/event/<int:id>/attending')
def get_attendees(id, lim):
# TODO: Implement when new model available
pass
| 24 | 46 | 0.730903 |
7b5f0972578c2eb9ea96360ea4aed13d8b89592e | 3,220 | py | Python | Sources/AlphaBot2/Web-Control/AlphaBot.py | maroneal/SmartC | 515502d69832b5acf427715b87f0cc17d10e7987 | [
"BSD-2-Clause"
] | null | null | null | Sources/AlphaBot2/Web-Control/AlphaBot.py | maroneal/SmartC | 515502d69832b5acf427715b87f0cc17d10e7987 | [
"BSD-2-Clause"
] | null | null | null | Sources/AlphaBot2/Web-Control/AlphaBot.py | maroneal/SmartC | 515502d69832b5acf427715b87f0cc17d10e7987 | [
"BSD-2-Clause"
] | 2 | 2019-03-04T08:26:39.000Z | 2019-04-15T09:40:31.000Z | import RPi.GPIO as GPIO
import time
class AlphaBot(object):
def __init__(self,in1=13,in2=12,ena=6,in3=21,in4=20,enb=26):
self.IN1 = in1
self.IN2 = in2
self.IN3 = in3
self.IN4 = in4
self.ENA = ena
self.ENB = enb
self.PA = 50
self.PB = 50
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.IN1,GPIO.OUT)
GPIO.setup(self.IN2,GPIO.OUT)
GPIO.setup(self.IN3,GPIO.OUT)
GPIO.setup(self.IN4,GPIO.OUT)
GPIO.setup(self.ENA,GPIO.OUT)
GPIO.setup(self.ENB,GPIO.OUT)
self.PWMA = GPIO.PWM(self.ENA,500)
self.PWMB = GPIO.PWM(self.ENB,500)
self.PWMA.start(self.PA)
self.PWMB.start(self.PB)
self.stop()
def forward(self):
self.PWMA.ChangeDutyCycle(self.PA)
self.PWMB.ChangeDutyCycle(self.PB)
GPIO.output(self.IN1,GPIO.HIGH)
GPIO.output(self.IN2,GPIO.LOW)
GPIO.output(self.IN3,GPIO.HIGH)
GPIO.output(self.IN4,GPIO.LOW)
def stop(self):
self.PWMA.ChangeDutyCycle(0)
self.PWMB.ChangeDutyCycle(0)
GPIO.output(self.IN1,GPIO.LOW)
GPIO.output(self.IN2,GPIO.LOW)
GPIO.output(self.IN3,GPIO.LOW)
GPIO.output(self.IN4,GPIO.LOW)
def backward(self):
self.PWMA.ChangeDutyCycle(self.PA)
self.PWMB.ChangeDutyCycle(self.PB)
GPIO.output(self.IN1,GPIO.LOW)
GPIO.output(self.IN2,GPIO.HIGH)
GPIO.output(self.IN3,GPIO.LOW)
GPIO.output(self.IN4,GPIO.HIGH)
def left(self):
self.PWMA.ChangeDutyCycle(0)
self.PWMB.ChangeDutyCycle(self.PB)
GPIO.output(self.IN1,GPIO.HIGH)
GPIO.output(self.IN2,GPIO.LOW)
GPIO.output(self.IN3,GPIO.HIGH)
GPIO.output(self.IN4,GPIO.LOW)
def right(self):
self.PWMA.ChangeDutyCycle(self.PA)
self.PWMB.ChangeDutyCycle(0)
GPIO.output(self.IN1,GPIO.HIGH)
GPIO.output(self.IN2,GPIO.LOW)
GPIO.output(self.IN3,GPIO.HIGH)
GPIO.output(self.IN4,GPIO.LOW)
def setPWMA(self,value):
self.PA = value
self.PWMA.ChangeDutyCycle(self.PA)
def setPWMB(self,value):
self.PB = value
self.PWMB.ChangeDutyCycle(self.PB)
def setMotor(self, left, right):
if((right >= 0) and (right <= 100)):
GPIO.output(self.IN1,GPIO.HIGH)
GPIO.output(self.IN2,GPIO.LOW)
self.PWMA.ChangeDutyCycle(right)
elif((right < 0) and (right >= -100)):
GPIO.output(self.IN1,GPIO.LOW)
GPIO.output(self.IN2,GPIO.HIGH)
self.PWMA.ChangeDutyCycle(0 - right)
if((left >= 0) and (left <= 100)):
GPIO.output(self.IN3,GPIO.HIGH)
GPIO.output(self.IN4,GPIO.LOW)
self.PWMB.ChangeDutyCycle(left)
elif((left < 0) and (left >= -100)):
GPIO.output(self.IN3,GPIO.LOW)
GPIO.output(self.IN4,GPIO.HIGH)
self.PWMB.ChangeDutyCycle(0 - left)
if __name__=='__main__':
Ab = AlphaBot()
Ab.forward()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
| 30.666667 | 64 | 0.583851 |
8009305111c1e94cba8d895d553d0ba116798d8d | 848 | py | Python | Examples/get_worksheet_pivot_table_filers.py | aspose-cells-cloud/aspose-cells-cloud-python | 0189236d38053dc67f7edc754b5101f17262cee8 | [
"MIT"
] | 3 | 2018-05-23T03:16:26.000Z | 2020-11-07T11:42:41.000Z | Examples/get_worksheet_pivot_table_filers.py | aspose-cells-cloud/aspose-cells-cloud-python | 0189236d38053dc67f7edc754b5101f17262cee8 | [
"MIT"
] | null | null | null | Examples/get_worksheet_pivot_table_filers.py | aspose-cells-cloud/aspose-cells-cloud-python | 0189236d38053dc67f7edc754b5101f17262cee8 | [
"MIT"
] | 4 | 2018-08-29T18:45:05.000Z | 2021-03-25T07:59:56.000Z | import os
import sys
ABSPATH = os.path.abspath(os.path.realpath(os.path.dirname(__file__)) + "/..")
sys.path.append(ABSPATH)
import asposecellscloud
from asposecellscloud.rest import ApiException
from asposecellscloud.apis.cells_api import CellsApi
import AuthUtil
from asposecellscloud.models import CalculationOptions
from asposecellscloud.models import FontSetting
from asposecellscloud.models import Font
from asposecellscloud.models import Style
api_client = AuthUtil.GetApiClient()
api = asposecellscloud.apis.cells_api.CellsApi(api_client)
name ='TestCase.xlsx'
sheet_name ='Sheet4'
pivotTableIndex = 0
fieldIndex = 0
folder = "Temp"
AuthUtil.Ready(name, folder)
result = self.api.cells_pivot_tables_get_worksheet_pivot_table_filters(name, sheet_name,pivotTableIndex, folder=folder) | 35.333333 | 127 | 0.779481 |
7af39dd9e610bc674c61b393992aa707f3804266 | 879 | py | Python | helper_funcs/chat_base.py | Aanchal101/TG-RENAMER-BOT | af747c5c18fd7632ce201febc638605fe44bcb58 | [
"Apache-2.0"
] | null | null | null | helper_funcs/chat_base.py | Aanchal101/TG-RENAMER-BOT | af747c5c18fd7632ce201febc638605fe44bcb58 | [
"Apache-2.0"
] | 1 | 2022-03-07T10:48:50.000Z | 2022-03-07T10:48:50.000Z | helper_funcs/chat_base.py | Aanchal101/TG-RENAMER-BOT | af747c5c18fd7632ce201febc638605fe44bcb58 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Anonymous
# the logging things
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
import os
# the secret configuration specific things
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
# the Strings used for this "thing"
from translation import Translation
from pyrogram import Client, Filters
# the Telegram trackings
from chatbase import Message
def TRChatBase(chat_id, message_text, intent):
msg = Message(api_key=Config.CHAT_BASE_TOKEN,
platform="Telegram",
version="1.3",
user_id=chat_id,
message=message_text,
intent=intent)
resp = msg.send()
| 24.416667 | 82 | 0.667804 |
ef8fb68dccd499a5dde71d7116bdb5ecaa341765 | 59,623 | py | Python | kitsune/questions/tests/test_templates.py | mattp1488/kitsune | 322287a3dd0dc52ccd54f0cb8e8016246da99a62 | [
"BSD-3-Clause"
] | null | null | null | kitsune/questions/tests/test_templates.py | mattp1488/kitsune | 322287a3dd0dc52ccd54f0cb8e8016246da99a62 | [
"BSD-3-Clause"
] | null | null | null | kitsune/questions/tests/test_templates.py | mattp1488/kitsune | 322287a3dd0dc52ccd54f0cb8e8016246da99a62 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import random
from datetime import datetime, timedelta
from string import letters
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.cache import cache
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from taggit.models import Tag
from tidings.models import Watch
import kitsune.questions.tasks
from kitsune.products.tests import ProductFactory
from kitsune.questions.events import QuestionReplyEvent, QuestionSolvedEvent
from kitsune.questions.models import Question, Answer, VoteMetadata, QuestionLocale
from kitsune.questions.tests import (
TestCaseBase,
tags_eq,
QuestionFactory,
AnswerFactory,
AnswerVoteFactory,
)
from kitsune.questions.views import UNAPPROVED_TAG, NO_TAG
from kitsune.search.tests import ElasticTestCase
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.tests import (
get,
post,
attrs_eq,
emailmessage_raise_smtp,
TestCase,
LocalizingClient,
)
from kitsune.sumo.urlresolvers import reverse
from kitsune.tags.tests import TagFactory
from kitsune.products.tests import TopicFactory
from kitsune.upload.models import ImageAttachment
from kitsune.users.tests import UserFactory, add_permission
from kitsune.wiki.tests import DocumentFactory, ApprovedRevisionFactory
class AnswersTemplateTestCase(TestCaseBase):
"""Test the Answers template."""
def setUp(self):
super(AnswersTemplateTestCase, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password="testpass")
self.question = AnswerFactory().question
self.answer = self.question.answers.all()[0]
def test_answer(self):
"""Posting a valid answer inserts it."""
num_answers = self.question.answers.count()
content = "lorem ipsum dolor sit amet"
response = post(
self.client,
"questions.reply",
{"content": content},
args=[self.question.id],
)
eq_(1, len(response.redirect_chain))
eq_(num_answers + 1, self.question.answers.count())
new_answer = self.question.answers.order_by("-id")[0]
eq_(content, new_answer.content)
# Check canonical url
doc = pq(response.content)
eq_(
"%s/en-US/questions/%s" % (settings.CANONICAL_URL, self.question.id),
doc('link[rel="canonical"]')[0].attrib["href"],
)
def test_answer_upload(self):
"""Posting answer attaches an existing uploaded image to the answer."""
f = open("kitsune/upload/tests/media/test.jpg")
post(
self.client,
"upload.up_image_async",
{"image": f},
args=["auth.User", self.user.pk],
)
f.close()
content = "lorem ipsum dolor sit amet"
response = post(
self.client,
"questions.reply",
{"content": content},
args=[self.question.id],
)
eq_(200, response.status_code)
new_answer = self.question.answers.order_by("-id")[0]
eq_(1, new_answer.images.count())
image = new_answer.images.all()[0]
name = "098f6b.png"
message = 'File name "%s" does not contain "%s"' % (image.file.name, name)
assert name in image.file.name, message
eq_(self.user.username, image.creator.username)
# Clean up
ImageAttachment.objects.all().delete()
def test_solve_unsolve(self):
"""Test accepting a solution and undoing."""
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(0, len(doc("div.solution")))
ans = self.question.answers.all()[0]
# Sign in as asker, solve and verify
self.client.login(username=self.question.creator.username, password="testpass")
response = post(self.client, "questions.solve", args=[self.question.id, ans.id])
eq_(200, response.status_code)
doc = pq(response.content)
eq_(1, len(doc("div.solution")))
div = doc("h3.is-solution")[0].getparent().getparent()
eq_("answer-%s" % ans.id, div.attrib["id"])
q = Question.objects.get(pk=self.question.id)
eq_(q.solution, ans)
eq_(q.solver, self.question.creator)
# Try to solve again with different answer. It shouldn't blow up or
# change the solution.
AnswerFactory(question=q)
response = post(self.client, "questions.solve", args=[self.question.id, ans.id])
eq_(200, response.status_code)
q = Question.objects.get(pk=self.question.id)
eq_(q.solution, ans)
# Unsolve and verify
response = post(
self.client, "questions.unsolve", args=[self.question.id, ans.id]
)
q = Question.objects.get(pk=self.question.id)
eq_(q.solution, None)
eq_(q.solver, None)
def test_only_owner_or_admin_can_solve_unsolve(self):
"""Make sure non-owner/non-admin can't solve/unsolve."""
# Try as asker
self.client.login(username=self.question.creator.username, password="testpass")
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(1, len(doc('input[name="solution"]')))
self.client.logout()
# Try as a nobody
u = UserFactory()
self.client.login(username=u.username, password="testpass")
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(0, len(doc('input[name="solution"]')))
ans = self.question.answers.all()[0]
# Try to solve
response = post(self.client, "questions.solve", args=[self.question.id, ans.id])
eq_(403, response.status_code)
# Try to unsolve
response = post(
self.client, "questions.unsolve", args=[self.question.id, ans.id]
)
eq_(403, response.status_code)
def test_solve_unsolve_with_perm(self):
"""Test marking solve/unsolve with 'change_solution' permission."""
u = UserFactory()
add_permission(u, Question, "change_solution")
self.client.login(username=u.username, password="testpass")
ans = self.question.answers.all()[0]
# Solve and verify
post(self.client, "questions.solve", args=[self.question.id, ans.id])
q = Question.objects.get(pk=self.question.id)
eq_(q.solution, ans)
eq_(q.solver, u)
# Unsolve and verify
post(self.client, "questions.unsolve", args=[self.question.id, ans.id])
q = Question.objects.get(pk=self.question.id)
eq_(q.solution, None)
eq_(q.solver, None)
def test_needs_info_checkbox(self):
"""Test that needs info checkbox is correctly shown"""
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(1, len(doc('input[name="needsinfo"]')))
self.question.set_needs_info()
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(1, len(doc('input[name="clear_needsinfo"]')))
def test_question_vote_GET(self):
"""Attempting to vote with HTTP GET returns a 405."""
response = get(self.client, "questions.vote", args=[self.question.id])
eq_(405, response.status_code)
def common_vote(self, me_too_count=1):
"""Helper method for question vote tests."""
# Check that there are no votes and vote form renders
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
assert "0" in doc(".have-problem")[0].text
eq_(me_too_count, len(doc("div.me-too form")))
# Vote
ua = "Mozilla/5.0 (DjangoTestClient)"
self.client.post(
reverse("questions.vote", args=[self.question.id]), {}, HTTP_USER_AGENT=ua
)
# Check that there is 1 vote and vote form doesn't render
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
assert "1" in doc(".have-problem")[0].text
eq_(0, len(doc("div.me-too form")))
# Verify user agent
vote_meta = VoteMetadata.objects.all()[0]
eq_("ua", vote_meta.key)
eq_(ua, vote_meta.value)
# Voting again (same user) should not increment vote count
post(self.client, "questions.vote", args=[self.question.id])
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
assert "1" in doc(".have-problem")[0].text
def test_question_authenticated_vote(self):
"""Authenticated user vote."""
# Common vote test
self.common_vote()
def test_question_anonymous_vote(self):
"""Anonymous user vote."""
# Log out
self.client.logout()
# Common vote test
self.common_vote(2)
def common_answer_vote(self):
"""Helper method for answer vote tests."""
# Check that there are no votes and vote form renders
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(1, len(doc('form.helpful button[name="helpful"]')))
# Vote
ua = "Mozilla/5.0 (DjangoTestClient)"
self.client.post(
reverse("questions.answer_vote", args=[self.question.id, self.answer.id]),
{"helpful": "y"},
HTTP_USER_AGENT=ua,
)
# Check that there is 1 vote and vote form doesn't render
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(1, len(doc("#answer-%s span.is-helpful" % self.answer.id)))
eq_(0, len(doc('form.helpful input[name="helpful"]')))
# Verify user agent
vote_meta = VoteMetadata.objects.all()[0]
eq_("ua", vote_meta.key)
eq_(ua, vote_meta.value)
def test_answer_authenticated_vote(self):
"""Authenticated user answer vote."""
# log in as new user (didn't ask or answer question)
self.client.logout()
u = UserFactory()
self.client.login(username=u.username, password="testpass")
# Common vote test
self.common_answer_vote()
def test_answer_anonymous_vote(self):
"""Anonymous user answer vote."""
# Log out
self.client.logout()
# Common vote test
self.common_answer_vote()
def test_can_vote_on_asker_reply(self):
"""An answer posted by the asker can be voted on."""
self.client.logout()
# Post a new answer by the asker => two votable answers
q = self.question
Answer.objects.create(question=q, creator=q.creator, content="test")
response = get(self.client, "questions.details", args=[q.id])
doc = pq(response.content)
eq_(2, len(doc('form.helpful button[name="helpful"]')))
def test_asker_can_vote(self):
"""The asker can vote Not/Helpful."""
self.client.login(username=self.question.creator.username, password="testpass")
self.common_answer_vote()
def test_can_solve_with_answer_by_asker(self):
"""An answer posted by the asker can be the solution."""
self.client.login(username=self.question.creator.username, password="testpass")
# Post a new answer by the asker => two solvable answers
q = self.question
Answer.objects.create(question=q, creator=q.creator, content="test")
response = get(self.client, "questions.details", args=[q.id])
doc = pq(response.content)
eq_(2, len(doc('form.solution input[name="solution"]')))
def test_delete_question_without_permissions(self):
"""Deleting a question without permissions is a 403."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
response = get(self.client, "questions.delete", args=[self.question.id])
eq_(403, response.status_code)
response = post(self.client, "questions.delete", args=[self.question.id])
eq_(403, response.status_code)
def test_delete_question_logged_out(self):
"""Deleting a question while logged out redirects to login."""
self.client.logout()
response = get(self.client, "questions.delete", args=[self.question.id])
redirect = response.redirect_chain[0]
eq_(302, redirect[1])
eq_(
"/%s%s?next=/en-US/questions/%s/delete"
% (settings.LANGUAGE_CODE, settings.LOGIN_URL, self.question.id),
redirect[0],
)
response = post(self.client, "questions.delete", args=[self.question.id])
redirect = response.redirect_chain[0]
eq_(302, redirect[1])
eq_(
"/%s%s?next=/en-US/questions/%s/delete"
% (settings.LANGUAGE_CODE, settings.LOGIN_URL, self.question.id),
redirect[0],
)
def test_delete_question_with_permissions(self):
"""Deleting a question with permissions."""
u = UserFactory()
add_permission(u, Question, "delete_question")
self.client.login(username=u.username, password="testpass")
response = get(self.client, "questions.delete", args=[self.question.id])
eq_(200, response.status_code)
response = post(self.client, "questions.delete", args=[self.question.id])
eq_(0, Question.objects.filter(pk=self.question.id).count())
def test_delete_answer_without_permissions(self):
"""Deleting an answer without permissions sends 403."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
ans = self.question.last_answer
response = get(
self.client, "questions.delete_answer", args=[self.question.id, ans.id]
)
eq_(403, response.status_code)
response = post(
self.client, "questions.delete_answer", args=[self.question.id, ans.id]
)
eq_(403, response.status_code)
def test_delete_answer_logged_out(self):
"""Deleting an answer while logged out redirects to login."""
self.client.logout()
q = self.question
ans = q.last_answer
response = get(
self.client, "questions.delete_answer", args=[self.question.id, ans.id]
)
redirect = response.redirect_chain[0]
eq_(302, redirect[1])
eq_(
"/%s%s?next=/en-US/questions/%s/delete/%s"
% (settings.LANGUAGE_CODE, settings.LOGIN_URL, q.id, ans.id),
redirect[0],
)
response = post(
self.client, "questions.delete_answer", args=[self.question.id, ans.id]
)
redirect = response.redirect_chain[0]
eq_(302, redirect[1])
eq_(
"/%s%s?next=/en-US/questions/%s/delete/%s"
% (settings.LANGUAGE_CODE, settings.LOGIN_URL, q.id, ans.id),
redirect[0],
)
def test_delete_answer_with_permissions(self):
"""Deleting an answer with permissions."""
ans = self.question.last_answer
u = UserFactory()
add_permission(u, Answer, "delete_answer")
self.client.login(username=u.username, password="testpass")
response = get(
self.client, "questions.delete_answer", args=[self.question.id, ans.id]
)
eq_(200, response.status_code)
response = post(
self.client, "questions.delete_answer", args=[self.question.id, ans.id]
)
eq_(0, Answer.objects.filter(pk=self.question.id).count())
def test_edit_answer_without_permission(self):
"""Editing an answer without permissions returns a 403.
The edit link shouldn't show up on the Answers page."""
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(0, len(doc("ol.answers li.edit")))
answer = self.question.last_answer
response = get(
self.client, "questions.edit_answer", args=[self.question.id, answer.id]
)
eq_(403, response.status_code)
content = "New content for answer"
response = post(
self.client,
"questions.edit_answer",
{"content": content},
args=[self.question.id, answer.id],
)
eq_(403, response.status_code)
def test_edit_answer_with_permissions(self):
"""Editing an answer with permissions.
The edit link should show up on the Answers page."""
u = UserFactory()
add_permission(u, Answer, "change_answer")
self.client.login(username=u.username, password="testpass")
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(1, len(doc("li.edit")))
answer = self.question.last_answer
response = get(
self.client, "questions.edit_answer", args=[self.question.id, answer.id]
)
eq_(200, response.status_code)
content = "New content for answer"
response = post(
self.client,
"questions.edit_answer",
{"content": content},
args=[self.question.id, answer.id],
)
eq_(content, Answer.objects.get(pk=answer.id).content)
def test_answer_creator_can_edit(self):
"""The creator of an answer can edit his/her answer."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
# Initially there should be no edit links
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(0, len(doc("ul.mzp-c-menu-list-list li.edit")))
# Add an answer and verify the edit link shows up
content = "lorem ipsum dolor sit amet"
response = post(
self.client,
"questions.reply",
{"content": content},
args=[self.question.id],
)
doc = pq(response.content)
eq_(1, len(doc("li.edit")))
new_answer = self.question.answers.order_by("-id")[0]
eq_(1, len(doc("#answer-%s li.edit" % new_answer.id)))
# Make sure it can be edited
content = "New content for answer"
response = post(
self.client,
"questions.edit_answer",
{"content": content},
args=[self.question.id, new_answer.id],
)
eq_(200, response.status_code)
# Now lock it and make sure it can't be edited
self.question.is_locked = True
self.question.save()
response = post(
self.client,
"questions.edit_answer",
{"content": content},
args=[self.question.id, new_answer.id],
)
eq_(403, response.status_code)
def test_lock_question_without_permissions(self):
"""Trying to lock a question without permission is a 403."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
q = self.question
response = post(self.client, "questions.lock", args=[q.id])
eq_(403, response.status_code)
def test_lock_question_logged_out(self):
"""Trying to lock a question while logged out redirects to login."""
self.client.logout()
q = self.question
response = post(self.client, "questions.lock", args=[q.id])
redirect = response.redirect_chain[0]
eq_(302, redirect[1])
eq_(
"/%s%s?next=/en-US/questions/%s/lock"
% (settings.LANGUAGE_CODE, settings.LOGIN_URL, q.id),
redirect[0],
)
def test_lock_question_with_permissions_GET(self):
"""Trying to lock a question via HTTP GET."""
u = UserFactory()
add_permission(u, Question, "lock_question")
self.client.login(username=u.username, password="testpass")
response = get(self.client, "questions.lock", args=[self.question.id])
eq_(405, response.status_code)
def test_lock_question_with_permissions_POST(self):
"""Locking questions with permissions via HTTP POST."""
u = UserFactory()
add_permission(u, Question, "lock_question")
self.client.login(username=u.username, password="testpass")
q = self.question
response = post(self.client, "questions.lock", args=[q.id])
eq_(200, response.status_code)
eq_(True, Question.objects.get(pk=q.pk).is_locked)
assert "This thread was closed." in response.content
# now unlock it
response = post(self.client, "questions.lock", args=[q.id])
eq_(200, response.status_code)
eq_(False, Question.objects.get(pk=q.pk).is_locked)
def test_reply_to_locked_question(self):
"""Locked questions can't be answered."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
# Without add_answer permission, we should 403.
q = self.question
q.is_locked = True
q.save()
response = post(
self.client, "questions.reply", {"content": "just testing"}, args=[q.id]
)
eq_(403, response.status_code)
# With add_answer permission, it should work.
add_permission(u, Answer, "add_answer")
response = post(
self.client, "questions.reply", {"content": "just testing"}, args=[q.id]
)
eq_(200, response.status_code)
def test_edit_answer_locked_question(self):
"""Verify edit answer of a locked question only with permissions."""
self.question.is_locked = True
self.question.save()
# The answer creator can't edit if question is locked
u = self.question.last_answer.creator
self.client.login(username=u.username, password="testpass")
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(0, len(doc("li.edit")))
answer = self.question.last_answer
response = get(
self.client, "questions.edit_answer", args=[self.question.id, answer.id]
)
eq_(403, response.status_code)
# A user with edit_answer permission can edit.
u = UserFactory()
self.client.login(username=u.username, password="testpass")
add_permission(u, Answer, "change_answer")
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(1, len(doc("li.edit")))
answer = self.question.last_answer
response = get(
self.client, "questions.edit_answer", args=[self.question.id, answer.id]
)
eq_(200, response.status_code)
content = "New content for answer"
response = post(
self.client,
"questions.edit_answer",
{"content": content},
args=[self.question.id, answer.id],
)
eq_(content, Answer.objects.get(pk=answer.id).content)
def test_vote_locked_question_403(self):
"""Locked questions can't be voted on."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
q = self.question
q.is_locked = True
q.save()
response = post(self.client, "questions.vote", args=[q.id])
eq_(403, response.status_code)
def test_vote_answer_to_locked_question_403(self):
"""Answers to locked questions can't be voted on."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
q = self.question
q.is_locked = True
q.save()
response = post(
self.client,
"questions.answer_vote",
{"helpful": "y"},
args=[q.id, self.answer.id],
)
eq_(403, response.status_code)
def test_watch_GET_405(self):
"""Watch replies with HTTP GET results in 405."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
response = get(self.client, "questions.watch", args=[self.question.id])
eq_(405, response.status_code)
def test_unwatch_GET_405(self):
"""Unwatch replies with HTTP GET results in 405."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
response = get(self.client, "questions.unwatch", args=[self.question.id])
eq_(405, response.status_code)
def test_watch_replies(self):
"""Watch a question for replies."""
self.client.logout()
# Delete existing watches
Watch.objects.all().delete()
post(
self.client,
"questions.watch",
{"email": "some@bo.dy", "event_type": "reply"},
args=[self.question.id],
)
assert QuestionReplyEvent.is_notifying(
"some@bo.dy", self.question
), "Watch was not created"
attrs_eq(
mail.outbox[0],
to=["some@bo.dy"],
subject="Please confirm your email address",
)
assert "questions/confirm/" in mail.outbox[0].body
assert "New answers" in mail.outbox[0].body
# Now activate the watch.
w = Watch.objects.get()
get(self.client, "questions.activate_watch", args=[w.id, w.secret])
assert Watch.objects.get(id=w.id).is_active
@mock.patch.object(mail.EmailMessage, "send")
def test_watch_replies_smtp_error(self, emailmessage_send):
"""Watch a question for replies and fail to send email."""
emailmessage_send.side_effect = emailmessage_raise_smtp
self.client.logout()
r = post(
self.client,
"questions.watch",
{"email": "some@bo.dy", "event_type": "reply"},
args=[self.question.id],
)
assert not QuestionReplyEvent.is_notifying(
"some@bo.dy", self.question
), "Watch was created"
self.assertContains(r, "Could not send a message to that email")
def test_watch_replies_wrong_secret(self):
"""Watch a question for replies."""
# This also covers test_watch_solution_wrong_secret.
self.client.logout()
# Delete existing watches
Watch.objects.all().delete()
post(
self.client,
"questions.watch",
{"email": "some@bo.dy", "event_type": "reply"},
args=[self.question.id],
)
# Now activate the watch.
w = Watch.objects.get()
r = get(self.client, "questions.activate_watch", args=[w.id, "fail"])
eq_(200, r.status_code)
assert not Watch.objects.get(id=w.id).is_active
def test_watch_replies_logged_in(self):
"""Watch a question for replies (logged in)."""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
u = User.objects.get(username=u.username)
post(
self.client,
"questions.watch",
{"event_type": "reply"},
args=[self.question.id],
)
assert QuestionReplyEvent.is_notifying(
u, self.question
), "Watch was not created"
return u
def test_watch_solution(self):
"""Watch a question for solution."""
self.client.logout()
# Delete existing watches
Watch.objects.all().delete()
post(
self.client,
"questions.watch",
{"email": "some@bo.dy", "event_type": "solution"},
args=[self.question.id],
)
assert QuestionSolvedEvent.is_notifying(
"some@bo.dy", self.question
), "Watch was not created"
attrs_eq(
mail.outbox[0],
to=["some@bo.dy"],
subject="Please confirm your email address",
)
assert "questions/confirm/" in mail.outbox[0].body
assert "Solution found" in mail.outbox[0].body
# Now activate the watch.
w = Watch.objects.get()
get(self.client, "questions.activate_watch", args=[w.id, w.secret])
assert Watch.objects.get().is_active
def test_unwatch(self):
"""Unwatch a question."""
# First watch question.
u = self.test_watch_replies_logged_in()
# Then unwatch it.
self.client.login(username=u.username, password="testpass")
post(self.client, "questions.unwatch", args=[self.question.id])
assert not QuestionReplyEvent.is_notifying(
u, self.question
), "Watch was not destroyed"
def test_watch_solution_and_replies(self):
"""User subscribes to solution and replies: page doesn't break"""
u = UserFactory()
self.client.login(username=u.username, password="testpass")
QuestionReplyEvent.notify(u, self.question)
QuestionSolvedEvent.notify(u, self.question)
response = get(self.client, "questions.details", args=[self.question.id])
eq_(200, response.status_code)
def test_preview_answer(self):
"""Preview an answer."""
num_answers = self.question.answers.count()
content = "Awesome answer."
response = post(
self.client,
"questions.reply",
{"content": content, "preview": "any string"},
args=[self.question.id],
)
eq_(200, response.status_code)
doc = pq(response.content)
eq_(content, doc("#answer-preview div.content").text())
eq_(num_answers, self.question.answers.count())
def test_preview_answer_as_admin(self):
"""Preview an answer as admin and verify response is 200."""
u = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=u.username, password="testpass")
content = "Awesome answer."
response = post(
self.client,
"questions.reply",
{"content": content, "preview": "any string"},
args=[self.question.id],
)
eq_(200, response.status_code)
def test_links_nofollow(self):
"""Links posted in questions and answers should have rel=nofollow."""
q = self.question
q.content = "lorem http://ipsum.com"
q.save()
a = self.answer
a.content = "testing http://example.com"
a.save()
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_("nofollow", doc(".question .main-content a")[0].attrib["rel"])
eq_("nofollow", doc(".answer .main-content a")[0].attrib["rel"])
def test_robots_noindex_unsolved(self):
"""Verify noindex on unsolved questions."""
q = QuestionFactory()
# A brand new questions should be noindexed...
response = get(self.client, "questions.details", args=[q.id])
eq_(200, response.status_code)
doc = pq(response.content)
eq_(1, len(doc("meta[name=robots]")))
# If it has one answer, it should still be noindexed...
a = AnswerFactory(question=q)
response = get(self.client, "questions.details", args=[q.id])
eq_(200, response.status_code)
doc = pq(response.content)
eq_(1, len(doc("meta[name=robots]")))
# If the answer is the solution, then it shouldn't be noindexed
# anymore.
q.solution = a
q.save()
response = get(self.client, "questions.details", args=[q.id])
eq_(200, response.status_code)
doc = pq(response.content)
eq_(0, len(doc("meta[name=robots]")))
class TaggingViewTestsAsTagger(TestCaseBase):
"""Tests for views that add and remove tags, logged in as someone who can
add and remove but not create tags
Also hits the tag-related parts of the answer template.
"""
def setUp(self):
super(TaggingViewTestsAsTagger, self).setUp()
u = UserFactory()
add_permission(u, Question, "tag_question")
self.client.login(username=u.username, password="testpass")
self.question = QuestionFactory()
# add_tag view:
def test_add_tag_get_method(self):
"""Assert GETting the add_tag view redirects to the answers page."""
response = self.client.get(_add_tag_url(self.question.id))
url = "%s" % reverse(
"questions.details",
kwargs={"question_id": self.question.id},
force_locale=True,
)
self.assertRedirects(response, url)
def test_add_nonexistent_tag(self):
"""Assert adding a nonexistent tag sychronously shows an error."""
response = self.client.post(
_add_tag_url(self.question.id), data={"tag-name": "nonexistent tag"}
)
self.assertContains(response, UNAPPROVED_TAG)
def test_add_existent_tag(self):
"""Test adding a tag, case insensitivity, and space stripping."""
TagFactory(name="PURplepurplepurple", slug="purplepurplepurple")
response = self.client.post(
_add_tag_url(self.question.id),
data={"tag-name": " PURplepurplepurple "},
follow=True,
)
self.assertContains(response, "purplepurplepurple")
def test_add_no_tag(self):
"""Make sure adding a blank tag shows an error message."""
response = self.client.post(
_add_tag_url(self.question.id), data={"tag-name": ""}
)
self.assertContains(response, NO_TAG)
# add_tag_async view:
def test_add_async_nonexistent_tag(self):
"""Assert adding an nonexistent tag yields an AJAX error."""
response = self.client.post(
_add_async_tag_url(self.question.id),
data={"tag-name": "nonexistent tag"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertContains(response, UNAPPROVED_TAG, status_code=400)
def test_add_async_existent_tag(self):
"""Assert adding an unapplied tag."""
TagFactory(name="purplepurplepurple", slug="purplepurplepurple")
response = self.client.post(
_add_async_tag_url(self.question.id),
data={"tag-name": " PURplepurplepurple "},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertContains(response, "canonicalName")
tags = Question.objects.get(id=self.question.id).tags.all()
# Test the backend since we don't have a newly rendered page to
# rely on.
eq_([t.name for t in tags], ["purplepurplepurple"])
def test_add_async_no_tag(self):
"""Assert adding an empty tag asynchronously yields an AJAX error."""
response = self.client.post(
_add_async_tag_url(self.question.id),
data={"tag-name": ""},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertContains(response, NO_TAG, status_code=400)
# remove_tag view:
def test_remove_applied_tag(self):
"""Assert removing an applied tag succeeds."""
self.question.tags.add("green")
self.question.tags.add("colorless")
response = self.client.post(
_remove_tag_url(self.question.id), data={"remove-tag-colorless": "dummy"}
)
self._assert_redirects_to_question(response, self.question.id)
tags = Question.objects.get(pk=self.question.id).tags.all()
eq_([t.name for t in tags], ["green"])
def test_remove_unapplied_tag(self):
"""Test removing an unapplied tag fails silently."""
response = self.client.post(
_remove_tag_url(self.question.id), data={"remove-tag-lemon": "dummy"}
)
self._assert_redirects_to_question(response, self.question.id)
def test_remove_no_tag(self):
"""Make sure removing with no params provided redirects harmlessly."""
response = self.client.post(_remove_tag_url(self.question.id), data={})
self._assert_redirects_to_question(response, self.question.id)
def _assert_redirects_to_question(self, response, question_id):
url = "%s" % reverse(
"questions.details", kwargs={"question_id": question_id}, force_locale=True
)
self.assertRedirects(response, url)
# remove_tag_async view:
def test_remove_async_applied_tag(self):
"""Assert taking a tag off a question works."""
self.question.tags.add("green")
self.question.tags.add("colorless")
response = self.client.post(
_remove_async_tag_url(self.question.id),
data={"name": "colorless"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
eq_(response.status_code, 200)
tags = Question.objects.get(pk=self.question.id).tags.all()
eq_([t.name for t in tags], ["green"])
def test_remove_async_unapplied_tag(self):
"""Assert trying to remove a tag that isn't there succeeds."""
response = self.client.post(
_remove_async_tag_url(self.question.id),
data={"name": "lemon"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
eq_(response.status_code, 200)
def test_remove_async_no_tag(self):
"""Assert calling the remove handler with no param fails."""
response = self.client.post(
_remove_async_tag_url(self.question.id),
data={},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertContains(response, NO_TAG, status_code=400)
@mock.patch.object(kitsune.questions.tasks, "submit_ticket")
def test_escalate_tag(self, submit_ticket):
"""Verify that tagging a question "escalate" submits to zendesk."""
TagFactory(name="escalate", slug="escalate")
self.client.post(
_add_tag_url(self.question.id), data={"tag-name": "escalate"}, follow=True
)
question_url = u"https://example.com/en-US{url}".format(
url=self.question.get_absolute_url()
)
submit_ticket.assert_called_with(
email="support@mozilla.com",
category="Escalated",
subject=u"[Escalated] {title}".format(title=self.question.title),
body=u"{url}\n\n{content}".format(
url=question_url, content=self.question.content
),
tags=["escalate"],
)
class TaggingViewTestsAsAdmin(TestCaseBase):
"""Tests for views that create new tags, logged in as someone who can"""
def setUp(self):
super(TaggingViewTestsAsAdmin, self).setUp()
u = UserFactory()
add_permission(u, Question, "tag_question")
add_permission(u, Tag, "add_tag")
self.client.login(username=u.username, password="testpass")
self.question = QuestionFactory()
TagFactory(name="red", slug="red")
def test_add_new_tag(self):
"""Assert adding a nonexistent tag sychronously creates & adds it."""
self.client.post(
_add_tag_url(self.question.id), data={"tag-name": "nonexistent tag"}
)
tags_eq(Question.objects.get(id=self.question.id), ["nonexistent tag"])
def test_add_async_new_tag(self):
"""Assert adding an nonexistent tag creates & adds it."""
response = self.client.post(
_add_async_tag_url(self.question.id),
data={"tag-name": "nonexistent tag"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
eq_(response.status_code, 200)
tags_eq(Question.objects.get(id=self.question.id), ["nonexistent tag"])
def test_add_new_case_insensitive(self):
"""Adding a tag differing only in case from existing ones shouldn't
create a new tag."""
self.client.post(
_add_async_tag_url(self.question.id),
data={"tag-name": "RED"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
tags_eq(Question.objects.get(id=self.question.id), ["red"])
def test_add_new_canonicalizes(self):
"""Adding a new tag as an admin should still canonicalize case."""
response = self.client.post(
_add_async_tag_url(self.question.id),
data={"tag-name": "RED"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
eq_(json.loads(response.content)["canonicalName"], "red")
def _add_tag_url(question_id):
"""Return the URL to add_tag for question 1, an untagged question."""
return reverse("questions.add_tag", kwargs={"question_id": question_id})
def _add_async_tag_url(question_id):
"""Return the URL to add_tag_async for question 1, an untagged question."""
return reverse("questions.add_tag_async", kwargs={"question_id": question_id})
def _remove_tag_url(question_id):
"""Return URL to remove_tag for question 2, tagged {colorless, green}."""
return reverse("questions.remove_tag", kwargs={"question_id": question_id})
def _remove_async_tag_url(question_id):
"""Return URL to remove_tag_async on q. 2, tagged {colorless, green}."""
return reverse("questions.remove_tag_async", kwargs={"question_id": question_id})
class QuestionsTemplateTestCase(TestCaseBase):
def test_tagged(self):
u = UserFactory()
add_permission(u, Question, "tag_question")
tagname = "mobile"
TagFactory(name=tagname, slug=tagname)
self.client.login(username=u.username, password="testpass")
tagged = urlparams(
reverse("questions.list", args=["all"]), tagged=tagname, show="all"
)
# First there should be no questions tagged 'mobile'
response = self.client.get(tagged)
doc = pq(response.content)
eq_(0, len(doc(".forum--question-item")))
# Tag a question 'mobile'
q = QuestionFactory()
response = post(
self.client, "questions.add_tag", {"tag-name": tagname}, args=[q.id]
)
eq_(200, response.status_code)
# Add an answer
AnswerFactory(question=q)
# Now there should be 1 question tagged 'mobile'
response = self.client.get(tagged)
doc = pq(response.content)
eq_(1, len(doc(".forum--question-item")))
eq_(
"%s/en-US/questions/all?tagged=mobile&show=all" % settings.CANONICAL_URL,
doc('link[rel="canonical"]')[0].attrib["href"],
)
# Test a tag that doesn't exist. It shouldnt blow up.
url = urlparams(
reverse("questions.list", args=["all"]), tagged="garbage-plate", show="all"
)
response = self.client.get(url)
eq_(200, response.status_code)
def test_owner_tab_selected_in_list(self):
# Test one tab is selected for no show arg specified
questions_list = urlparams(reverse("questions.list", args=["all"]))
response = self.client.get(questions_list)
doc = pq(response.content)
eq_(1, len(doc("#owner-tabs .selected")))
# Test one tab is selected for all show args
show_args = ["needs-attention", "responded", "done", "all"]
for show_arg in show_args:
questions_list = urlparams(
reverse("questions.list", args=["all"]), show=show_arg
)
response = self.client.get(questions_list)
doc = pq(response.content)
eq_(1, len(doc("#owner-tabs .selected")))
def test_product_filter(self):
p1 = ProductFactory()
p2 = ProductFactory()
p3 = ProductFactory()
q1 = QuestionFactory()
q2 = QuestionFactory(product=p1)
q2.save()
q3 = QuestionFactory(product=p2)
q3.save()
def check(product, expected):
url = reverse("questions.list", args=[product])
response = self.client.get(url)
doc = pq(response.content)
# Make sure all questions are there.
# This won't work, because the test case base adds more tests than
# we expect in it's setUp(). TODO: Fix that.
eq_(len(expected), len(doc(".forum--question-item")))
for q in expected:
eq_(1, len(doc(".forum--question-item[id=question-%s]" % q.id)))
# No filtering -> All questions.
check("all", [q1, q2, q3])
# Filter on p1 -> only q2
check(p1.slug, [q2])
# Filter on p2 -> only q3
check(p2.slug, [q3])
# Filter on p3 -> No results
check(p3.slug, [])
# Filter on p1,p2
check("%s,%s" % (p1.slug, p2.slug), [q2, q3])
# Filter on p1,p3
check("%s,%s" % (p1.slug, p3.slug), [q2])
# Filter on p2,p3
check("%s,%s" % (p2.slug, p3.slug), [q3])
def test_topic_filter(self):
p = ProductFactory()
t1 = TopicFactory(product=p)
t2 = TopicFactory(product=p)
t3 = TopicFactory(product=p)
q1 = QuestionFactory()
q2 = QuestionFactory(topic=t1)
q3 = QuestionFactory(topic=t2)
url = reverse("questions.list", args=["all"])
def check(filter, expected):
response = self.client.get(urlparams(url, **filter))
doc = pq(response.content)
# Make sure all questions are there.
# This won't work, because the test case base adds more tests than
# we expect in it's setUp(). TODO: Fix that.
# eq_(len(expected), len(doc('.forum--question-item')))
for q in expected:
eq_(1, len(doc(".forum--question-item[id=question-%s]" % q.id)))
# No filtering -> All questions.
check({}, [q1, q2, q3])
# Filter on p1 -> only q2
check({"topic": t1.slug}, [q2])
# Filter on p2 -> only q3
check({"topic": t2.slug}, [q3])
# Filter on p3 -> No results
check({"topic": t3.slug}, [])
def test_robots_noindex(self):
"""Verify the page is set for noindex by robots."""
response = self.client.get(reverse("questions.list", args=["all"]))
eq_(200, response.status_code)
doc = pq(response.content)
eq_(1, len(doc("meta[name=robots]")))
def test_select_in_question(self):
"""Verify we properly escape <select/>."""
QuestionFactory(
title="test question lorem ipsum <select></select>",
content="test question content lorem ipsum <select></select>",
)
response = self.client.get(reverse("questions.list", args=["all"]))
assert "test question lorem ipsum" in response.content
assert "test question content lorem ipsum" in response.content
doc = pq(response.content)
eq_(0, len(doc("article.questions select")))
def test_truncated_text_is_stripped(self):
"""Verify we strip html from truncated text."""
long_str = "".join(random.choice(letters) for x in xrange(170))
QuestionFactory(content="<p>%s</p>" % long_str)
response = self.client.get(reverse("questions.list", args=["all"]))
# Verify that the <p> was stripped
assert '<p class="short-text"><p>' not in response.content
assert '<p class="short-text">%s' % long_str[:5] in response.content
def test_views(self):
"""Verify the view count is displayed correctly."""
q = QuestionFactory()
q.questionvisits_set.create(visits=1007)
response = self.client.get(reverse("questions.list", args=["all"]))
doc = pq(response.content)
eq_("1007", doc(".views-val").text())
def test_no_unarchive_on_old_questions(self):
ques = QuestionFactory(
created=(datetime.now() - timedelta(days=200)), is_archived=True
)
response = get(self.client, "questions.details", args=[ques.id])
assert "Archive this post" not in response.content
def test_show_is_empty_string_doesnt_500(self):
QuestionFactory()
response = self.client.get(
urlparams(reverse("questions.list", args=["all"]), show="")
)
eq_(200, response.status_code)
def test_product_shows_without_tags(self):
p = ProductFactory()
t = TopicFactory(product=p)
q = QuestionFactory(topic=t)
response = self.client.get(
urlparams(reverse("questions.list", args=["all"]), show="")
)
doc = pq(response.content)
tag = doc("#question-{id} .tag-list li a".format(id=q.id))
# Even though there are no tags, the product should be displayed.
assert p.title in tag[0].text
class QuestionsTemplateTestCaseNoFixtures(TestCase):
client_class = LocalizingClient
def test_locked_questions_dont_appear(self):
"""Locked questions are not listed on the no-replies list."""
QuestionFactory()
QuestionFactory()
QuestionFactory(is_locked=True)
url = reverse("questions.list", args=["all"])
url = urlparams(url, filter="no-replies")
response = self.client.get(url)
doc = pq(response.content)
eq_(2, len(doc(".forum--question-item")))
class QuestionEditingTests(TestCaseBase):
"""Tests for the question-editing view and templates"""
def setUp(self):
super(QuestionEditingTests, self).setUp()
self.user = UserFactory()
add_permission(self.user, Question, "change_question")
self.client.login(username=self.user.username, password="testpass")
def test_extra_fields(self):
"""The edit-question form should show appropriate metadata fields."""
question_id = QuestionFactory().id
response = get(
self.client, "questions.edit_question", kwargs={"question_id": question_id}
)
eq_(response.status_code, 200)
# Make sure each extra metadata field is in the form:
doc = pq(response.content)
q = Question.objects.get(pk=question_id)
extra_fields = q.product_config.get("extra_fields", []) + q.category_config.get(
"extra_fields", []
)
for field in extra_fields:
assert doc("input[name=%s]" % field) or doc("textarea[name=%s]" % field), (
"The %s field is missing from the edit page." % field
)
def test_no_extra_fields(self):
"""The edit-question form shouldn't show inappropriate metadata."""
question_id = QuestionFactory().id
response = get(
self.client, "questions.edit_question", kwargs={"question_id": question_id}
)
eq_(response.status_code, 200)
# Take the "os" field as representative. Make sure it doesn't show up:
doc = pq(response.content)
assert not doc("input[name=os]")
def test_post(self):
"""Posting a valid edit form should save the question."""
p = ProductFactory(slug="desktop")
q = QuestionFactory(product=p)
response = post(
self.client,
"questions.edit_question",
{
"title": "New title",
"content": "New content",
"ff_version": "New version",
},
kwargs={"question_id": q.id},
)
# Make sure the form redirects and thus appears to succeed:
url = "%s" % reverse(
"questions.details", kwargs={"question_id": q.id}, force_locale=True
)
self.assertRedirects(response, url)
# Make sure the static fields, the metadata, and the updated_by field
# changed:
q = Question.objects.get(pk=q.id)
eq_(q.title, "New title")
eq_(q.content, "New content")
eq_(q.updated_by, self.user)
class AAQTemplateTestCase(TestCaseBase):
"""Test the AAQ template."""
data = {
"title": "A test question",
"content": "I have this question that I hope...",
"category": "fix-problems",
"sites_affected": "http://example.com",
"ff_version": "3.6.6",
"os": "Intel Mac OS X 10.6",
"plugins": "* Shockwave Flash 10.1 r53",
"useragent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8.3) "
"Gecko/20120221 Firefox/18.0",
"troubleshooting": """{
"accessibility": {
"isActive": true
},
"application": {
"name": "Firefox",
"supportURL": "Some random url.",
"userAgent": "A user agent.",
"version": "18.0.2"
},
"extensions": [],
"graphics": {},
"javaScript": {},
"modifiedPreferences": {
"print.macosx.pagesetup": "QWERTY",
"print.macosx.pagesetup-2": "POIUYT"
},
"userJS": {
"exists": false
}
}""",
}
def setUp(self):
super(AAQTemplateTestCase, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password="testpass")
def _post_new_question(self, locale=None):
"""Post a new question and return the response."""
p = ProductFactory(title="Firefox", slug="firefox")
for l in QuestionLocale.objects.all():
p.questions_locales.add(l)
TopicFactory(slug="fix-problems", product=p)
extra = {}
if locale is not None:
extra["locale"] = locale
url = urlparams(
reverse("questions.aaq_step3", args=["desktop"], **extra)
)
# Set 'in-aaq' for the session. It isn't already set because this
# test doesn't do a GET of the form first.
s = self.client.session
s["in-aaq"] = True
s.save()
return self.client.post(url, self.data, follow=True)
def test_full_workflow(self):
response = self._post_new_question()
eq_(200, response.status_code)
assert "Done!" in pq(response.content)("ul.user-messages li").text()
# Verify question is in db now
question = Question.objects.filter(title="A test question")[0]
# Make sure question is in questions list
response = self.client.get(reverse("questions.list", args=["all"]))
doc = pq(response.content)
eq_(1, len(doc("#question-%s" % question.id)))
# And no email was sent
eq_(0, len(mail.outbox))
# Verify product and topic assigned to question.
eq_("fix-problems", question.topic.slug)
eq_("firefox", question.product.slug)
# Verify troubleshooting information
troubleshooting = question.metadata["troubleshooting"]
assert "modifiedPreferences" in troubleshooting
assert "print.macosx" not in troubleshooting
# Verify firefox version
version = question.metadata["ff_version"]
eq_("18.0.2", version)
def test_full_workflow_inactive(self):
"""
Test that an inactive user cannot create a new question
"""
u = self.user
u.is_active = False
u.save()
self._post_new_question()
eq_(0, Question.objects.count())
def test_localized_creation(self):
response = self._post_new_question(locale="pt-BR")
eq_(200, response.status_code)
assert "Done!" in pq(response.content)("ul.user-messages li").text()
# Verify question is in db now
question = Question.objects.filter(title="A test question")[0]
eq_(question.locale, "pt-BR")
def test_invalid_product_404(self):
url = reverse("questions.aaq_step2", args=["lipsum"])
response = self.client.get(url)
eq_(404, response.status_code)
def test_invalid_category_302(self):
ProductFactory(slug="firefox")
url = reverse("questions.aaq_step3", args=["desktop", "lipsum"])
response = self.client.get(url)
eq_(302, response.status_code)
class ProductForumTemplateTestCase(TestCaseBase):
def test_product_forum_listing(self):
firefox = ProductFactory(title="Firefox", slug="firefox")
android = ProductFactory(title="Firefox for Android", slug="mobile")
fxos = ProductFactory(title="Firefox OS", slug="firefox-os")
openbadges = ProductFactory(title="Open Badges", slug="open-badges")
lcl = QuestionLocale.objects.get(locale=settings.LANGUAGE_CODE)
firefox.questions_locales.add(lcl)
android.questions_locales.add(lcl)
fxos.questions_locales.add(lcl)
response = self.client.get(reverse("questions.home"))
eq_(200, response.status_code)
doc = pq(response.content)
eq_(4, len(doc(".product-list .product")))
product_list_html = doc(".product-list").html()
assert firefox.title in product_list_html
assert android.title in product_list_html
assert fxos.title in product_list_html
assert openbadges.title not in product_list_html
class RelatedThingsTestCase(ElasticTestCase):
def setUp(self):
super(RelatedThingsTestCase, self).setUp()
self.question = QuestionFactory(
title="lorem ipsum", content="lorem", product=ProductFactory()
)
def test_related_questions(self):
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(0, len(doc("#related-content .related-question")))
q1 = QuestionFactory(
title="lorem ipsum dolor", content="lorem", product=self.question.product
)
a1 = AnswerFactory(question=q1)
AnswerVoteFactory(answer=a1, helpful=True)
# Questions with no helpful answers should not be shown
q2 = QuestionFactory(
title="lorem ipsum dolor", content="lorem", product=self.question.product
)
AnswerFactory(question=q2)
# Questions that belong to different products should not be shown
q3 = QuestionFactory(
title="lorem ipsum dolor", content="lorem", product=ProductFactory()
)
a3 = AnswerFactory(question=q3)
AnswerVoteFactory(answer=a3, helpful=True)
cache.clear()
self.refresh()
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(1, len(doc("#related-content .related-question")))
def test_related_documents(self):
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(0, len(doc("#related-content .related-document")))
d1 = DocumentFactory(title="lorem ipsum")
d1.products.add(self.question.product)
r1 = ApprovedRevisionFactory(
document=d1, summary="lorem", content="lorem ipsum dolor"
)
d1.current_revision = r1
d1.save()
cache.clear()
self.refresh()
response = get(self.client, "questions.details", args=[self.question.id])
doc = pq(response.content)
eq_(1, len(doc("#related-content .related-document")))
| 37.428123 | 88 | 0.610033 |
b12785af310c77383da37aa2ffb2e993c62209d4 | 881 | py | Python | hottbox/algorithms/classification/tests/test_ensemble_learning.py | adamurban98/hottbox | 26580018ec6d38a1b08266c04ce4408c9e276130 | [
"Apache-2.0"
] | 167 | 2018-05-07T10:31:00.000Z | 2022-02-24T19:20:31.000Z | hottbox/algorithms/classification/tests/test_ensemble_learning.py | adamurban98/hottbox | 26580018ec6d38a1b08266c04ce4408c9e276130 | [
"Apache-2.0"
] | 19 | 2018-05-10T13:26:39.000Z | 2020-01-31T12:49:27.000Z | hottbox/algorithms/classification/tests/test_ensemble_learning.py | adamurban98/hottbox | 26580018ec6d38a1b08266c04ce4408c9e276130 | [
"Apache-2.0"
] | 24 | 2018-04-02T17:16:50.000Z | 2021-12-07T06:21:40.000Z | from ..ensemble_learning import TelVI, TelVAC
class TestTelVI:
def test_init(self):
pass
def test_name(self):
pass
#
# def test_set_params(self):
# pass
#
# def test_get_params(self):
# pass
#
# def test_fit(self):
# pass
#
# def test_predict(self):
# pass
#
# def test_predict_proba(self):
# pass
#
# def test_score(self):
# pass
#
# # Tests for interaction with the base classifiers
# def test_name_base_clf(self):
# pass
#
# def test_set_params_base_clf(self):
# pass
#
# def test_get_params_base_clf(self):
# pass
#
# def test_fit_base_clf(self):
# pass
#
# def test_predict_base_clf(self):
# pass
#
# def test_predict_proba_base_clf(self):
# pass
#
# def test_score_base_clf(self):
# pass
| 17.62 | 55 | 0.572077 |
1a826d0f02df7f5f97625f90b75d0bea6687fcf9 | 4,613 | py | Python | ansible_task_worker/worker_fsm.py | benthomasson/ansible-task-worker | 33189b503e010df93adf486fde8c0eec9c436e18 | [
"Apache-2.0"
] | null | null | null | ansible_task_worker/worker_fsm.py | benthomasson/ansible-task-worker | 33189b503e010df93adf486fde8c0eec9c436e18 | [
"Apache-2.0"
] | 10 | 2020-01-05T19:08:49.000Z | 2021-11-15T17:47:59.000Z | ansible_task_worker/worker_fsm.py | benthomasson/ansible-task-worker | 33189b503e010df93adf486fde8c0eec9c436e18 | [
"Apache-2.0"
] | null | null | null | from gevent_fsm.fsm import State, transitions
from queue import Empty
from . import messages
class _RunTask(State):
@transitions('ShuttingDown')
def onShutdownRequested(self, controller, message_type, message):
controller.changeState(ShuttingDown)
@transitions('TaskComplete')
def onStatus(self, controller, message_type, message):
controller.changeState(TaskComplete)
@transitions('TaskComplete')
def onTaskComplete(self, controller, message_type, message):
task_id = controller.context.task_id
client_id = controller.context.client_id
controller.changeState(TaskComplete)
controller.outboxes['output'].put(messages.TaskComplete(task_id, client_id))
@transitions('Restart')
def onPlaybookFinished(self, controller, message_type, message):
controller.changeState(Restart)
RunTask = _RunTask()
class _TaskComplete(State):
@transitions('ShuttingDown')
def onShutdownRequested(self, controller, message_type, message):
controller.changeState(ShuttingDown)
@transitions('Ready')
def start(self, controller):
controller.changeState(Ready)
TaskComplete = _TaskComplete()
class _Initialize(State):
@transitions('Ready')
def start(self, controller):
controller.changeState(Ready)
Initialize = _Initialize()
class _Start(State):
@transitions('Waiting')
def start(self, controller):
controller.changeState(Waiting)
Start = _Start()
class _Waiting(State):
@transitions('Initialize')
def onTaskComplete(self, controller, message_type, message):
controller.changeState(Initialize)
@transitions('End')
def onPlaybookFinished(self, controller, message_type, message):
controller.changeState(End)
Waiting = _Waiting()
class _ShuttingDown(State):
def start(self, controller):
controller.context.cancel_requested = True
@transitions('End')
def onTaskComplete(self, controller, message_type, message):
controller.changeState(End)
@transitions('End')
def onPlaybookFinished(self, controller, message_type, message):
controller.changeState(End)
ShuttingDown = _ShuttingDown()
class _End(State):
def start(self, controller):
task_id = controller.context.task_id
client_id = controller.context.client_id
controller.outboxes['output'].put(messages.ShutdownComplete(task_id, client_id))
def onShutdownRequested(self, controller, message_type, message):
task_id = controller.context.task_id
client_id = controller.context.client_id
controller.outboxes['output'].put(messages.ShutdownComplete(task_id, client_id))
def onTaskComplete(self, controller, message_type, message):
task_id = controller.context.task_id
client_id = controller.context.client_id
controller.outboxes['output'].put(messages.ShutdownComplete(task_id, client_id))
def onPlaybookFinished(self, controller, message_type, message):
task_id = controller.context.task_id
client_id = controller.context.client_id
controller.outboxes['output'].put(messages.ShutdownComplete(task_id, client_id))
End = _End()
class _Ready(State):
@transitions('ShuttingDown')
def onShutdownRequested(self, controller, message_type, message):
controller.changeState(ShuttingDown)
def start(self, controller):
try:
while True:
message = controller.context.buffered_messages.get_nowait()
controller.context.queue.put(message)
except Empty:
pass
def onInventory(self, controller, message_type, message):
pass
@transitions('RunTask')
def onTask(self, controller, message_type, message):
controller.changeState(RunTask)
controller.context.task_id = message.id
controller.context.client_id = message.client_id
controller.context.run_task(message)
@transitions('Restart')
def onPlaybookFinished(self, controller, message_type, message):
controller.changeState(Restart)
Ready = _Ready()
class _Restart(State):
@transitions('Waiting')
def start(self, controller):
controller.context.stop_pause_handler()
controller.context.stop_status_handler()
controller.context.start_pause_handler()
controller.context.start_status_handler()
controller.context.start_play()
controller.changeState(Waiting)
controller.context.queue.put(controller.context.current_task)
Restart = _Restart()
| 26.36 | 88 | 0.708866 |
2038deec948cf9fa8b0c2f612bea3e4f97c9f343 | 4,740 | py | Python | userbot/plugins/filters.py | Blank-sama/X-tra-Telegram | 549317bd99a6e4864114a9e6d3aea899984cf437 | [
"Apache-2.0"
] | null | null | null | userbot/plugins/filters.py | Blank-sama/X-tra-Telegram | 549317bd99a6e4864114a9e6d3aea899984cf437 | [
"Apache-2.0"
] | null | null | null | userbot/plugins/filters.py | Blank-sama/X-tra-Telegram | 549317bd99a6e4864114a9e6d3aea899984cf437 | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Filters
Available Commands:
.savefilter
.listfilters
.clearfilter"""
import asyncio
import re
from telethon import events, utils
from telethon.tl import types
from Bonten.plugins.sql_helper.filter_sql import get_filter, add_filter, remove_filter, get_all_filters, remove_all_filters
DELETE_TIMEOUT = 0
TYPE_TEXT = 0
TYPE_PHOTO = 1
TYPE_DOCUMENT = 2
global last_triggered_filters
last_triggered_filters = {} # pylint:disable=E0602
@command(incoming=True)
async def on_snip(event):
global last_triggered_filters
name = event.raw_text
if event.chat_id in last_triggered_filters:
if name in last_triggered_filters[event.chat_id]:
# avoid userbot spam
# "I demand rights for us bots, we are equal to you humans." -Henri Koivuneva (t.me/UserbotTesting/2698)
return False
snips = get_all_filters(event.chat_id)
if snips:
for snip in snips:
pattern = r"( |^|[^\w])" + re.escape(snip.keyword) + r"( |$|[^\w])"
if re.search(pattern, name, flags=re.IGNORECASE):
if snip.snip_type == TYPE_PHOTO:
media = types.InputPhoto(
int(snip.media_id),
int(snip.media_access_hash),
snip.media_file_reference
)
elif snip.snip_type == TYPE_DOCUMENT:
media = types.InputDocument(
int(snip.media_id),
int(snip.media_access_hash),
snip.media_file_reference
)
else:
media = None
message_id = event.message.id
if event.reply_to_msg_id:
message_id = event.reply_to_msg_id
await event.reply(
snip.reply,
file=media
)
if event.chat_id not in last_triggered_filters:
last_triggered_filters[event.chat_id] = []
last_triggered_filters[event.chat_id].append(name)
await asyncio.sleep(DELETE_TIMEOUT)
last_triggered_filters[event.chat_id].remove(name)
@command(pattern="^.savefilter (.*)")
async def on_snip_save(event):
name = event.pattern_match.group(1)
msg = await event.get_reply_message()
if msg:
snip = {'type': TYPE_TEXT, 'text': msg.message or ''}
if msg.media:
media = None
if isinstance(msg.media, types.MessageMediaPhoto):
media = utils.get_input_photo(msg.media.photo)
snip['type'] = TYPE_PHOTO
elif isinstance(msg.media, types.MessageMediaDocument):
media = utils.get_input_document(msg.media.document)
snip['type'] = TYPE_DOCUMENT
if media:
snip['id'] = media.id
snip['hash'] = media.access_hash
snip['fr'] = media.file_reference
add_filter(event.chat_id, name, snip['text'], snip['type'], snip.get('id'), snip.get('hash'), snip.get('fr'))
await event.edit(f"filter {name} saved successfully. Get it with {name}")
else:
await event.edit("Reply to a message with `savefilter keyword` to save the filter")
@command(pattern="^.listfilters$")
async def on_snip_list(event):
all_snips = get_all_filters(event.chat_id)
OUT_STR = "Available Filters in the Current Chat:\n"
if len(all_snips) > 0:
for a_snip in all_snips:
OUT_STR += f"👉 {a_snip.keyword} \n"
else:
OUT_STR = "No Filters. Start Saving using `.savefilter`"
if len(OUT_STR) > 4096:
with io.BytesIO(str.encode(OUT_STR)) as out_file:
out_file.name = "filters.text"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="Available Filters in the Current Chat",
reply_to=event
)
await event.delete()
else:
await event.edit(OUT_STR)
@command(pattern="^.clearfilter (.*)")
async def on_snip_delete(event):
name = event.pattern_match.group(1)
remove_filter(event.chat_id, name)
await event.edit(f"filter {name} deleted successfully")
@command(pattern="^.clearallfilters$")
async def on_all_snip_delete(event):
remove_all_filters(event.chat_id)
await event.edit(f"filters **in current chat** deleted successfully")
| 37.03125 | 123 | 0.598945 |
cd5528d745ef8d0fe8d6a7434586d76ec4953918 | 4,595 | py | Python | qa/rpc-tests/multi_rpc.py | GINcoinl/gincoin-core | 4e45bbea153bb02f9964c863aea33ebb707e260c | [
"MIT"
] | 23 | 2018-12-18T16:54:16.000Z | 2021-05-06T19:34:50.000Z | qa/rpc-tests/multi_rpc.py | GINcoinl/gincoin-core | 4e45bbea153bb02f9964c863aea33ebb707e260c | [
"MIT"
] | 3 | 2019-01-26T08:41:46.000Z | 2020-05-25T13:02:11.000Z | qa/rpc-tests/multi_rpc.py | GINcoinl/gincoin-core | 4e45bbea153bb02f9964c863aea33ebb707e260c | [
"MIT"
] | 15 | 2018-12-28T19:09:37.000Z | 2020-06-13T18:19:10.000Z | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to gincoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "gincoin.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 37.663934 | 129 | 0.649619 |
adb5f7de0d1ba289efcc988442c49d295ca3e9d3 | 5,580 | py | Python | configs/swin/self_t_config_100_000025.py | Daeil-Jung/swin_food_detection | 76d6273f01a3695f412a915f70092afe15178e64 | [
"Apache-2.0"
] | null | null | null | configs/swin/self_t_config_100_000025.py | Daeil-Jung/swin_food_detection | 76d6273f01a3695f412a915f70092afe15178e64 | [
"Apache-2.0"
] | null | null | null | configs/swin/self_t_config_100_000025.py | Daeil-Jung/swin_food_detection | 76d6273f01a3695f412a915f70092afe15178e64 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
data_root = '/swin_food_detection/data/uecfood100_coco'
classes = ('rice', 'eels on rice', 'pilaf', "chicken-'n'-egg on rice", 'pork cutlet on rice', 'beef curry', 'sushi',
'chicken rice', 'fried rice', 'tempura bowl', 'bibimbap', 'toast', 'croissant', 'roll bread', 'raisin bread',
'chip butty', 'hamburger', 'pizza', 'sandwiches', 'udon noodle', 'tempura udon', 'soba noodle',
'ramen noodle', 'beef noodle', 'tensin noodle', 'fried noodle', 'spaghetti', 'Japanese-style pancake',
'takoyaki', 'gratin', 'sauteed vegetables', 'croquette', 'grilled eggplant', 'sauteed spinach',
'vegetable tempura', 'miso soup', 'potage', 'sausage', 'oden', 'omelet', 'ganmodoki', 'jiaozi', 'stew',
'teriyaki grilled fish', 'fried fish', 'grilled salmon', 'salmon meuniere ', 'sashimi',
'grilled pacific saury ', 'sukiyaki', 'sweet and sour pork', 'lightly roasted fish',
'steamed egg hotchpotch', 'tempura', 'fried chicken', 'sirloin cutlet ', 'nanbanzuke', 'boiled fish',
'seasoned beef with potatoes', 'hambarg steak', 'beef steak', 'dried fish', 'ginger pork saute',
'spicy chili-flavored tofu', 'yakitori', 'cabbage roll', 'rolled omelet', 'egg sunny-side up',
'fermented soybeans', 'cold tofu', 'egg roll', 'chilled noodle', 'stir-fried beef and peppers',
'simmered pork', 'boiled chicken and vegetables', 'sashimi bowl', 'sushi bowl',
'fish-shaped pancake with bean jam', 'shrimp with chill source', 'roast chicken', 'steamed meat dumpling',
'omelet with fried rice', 'cutlet curry', 'spaghetti meat sauce', 'fried shrimp', 'potato salad',
'green salad', 'macaroni salad', 'Japanese tofu and vegetable chowder', 'pork miso soup', 'chinese soup',
'beef bowl', 'kinpira-style sauteed burdock', 'rice ball', 'pizza toast', 'dipping noodles', 'hot dog',
'french fries', 'mixed rice', 'goya chanpuru')
model = dict(
type='FasterRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]),
roi_head=dict(bbox_head=dict(num_classes=100)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(
train=dict(
img_prefix=data_root,
classes=classes,
ann_file=data_root + "/annotations/train_anno.json",
pipeline=train_pipeline),
val=dict(
img_prefix=data_root,
classes=classes,
ann_file=data_root + "/annotations/valid_anno.json"),
test=dict(
img_prefix=data_root,
classes=classes,
ann_file=data_root + "/annotations/test_anno.json"))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0000025,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[27, 33])
runner = dict(max_epochs=36)
| 43.59375 | 123 | 0.5681 |
5f5e93ba241675f79867f525d36b46ce3f37977e | 8,231 | py | Python | demo_SMGP_toy.py | LiuHaiTao01/ModulatedGPs | bceac3aff0c40b2235edd72d31f49a523db0d4d0 | [
"Apache-2.0"
] | 1 | 2022-02-19T13:02:12.000Z | 2022-02-19T13:02:12.000Z | demo_SMGP_toy.py | LiuHaiTao01/ModulatedGPs | bceac3aff0c40b2235edd72d31f49a523db0d4d0 | [
"Apache-2.0"
] | null | null | null | demo_SMGP_toy.py | LiuHaiTao01/ModulatedGPs | bceac3aff0c40b2235edd72d31f49a523db0d4d0 | [
"Apache-2.0"
] | 1 | 2020-09-01T03:56:44.000Z | 2020-09-01T03:56:44.000Z | import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import numpy as np
from scipy.cluster.vq import kmeans2, kmeans
from ModulatedGPs.likelihoods import Gaussian
from ModulatedGPs.models import SMGP
from ModulatedGPs.layers import SVGP_Layer
from ModulatedGPs.kernels import RBF
from gpflow import settings
float_type = settings.float_type
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
# %matplotlib inline
import matplotlib.colors as mcolors
colors=[mcolors.TABLEAU_COLORS[key] for key in mcolors.TABLEAU_COLORS.keys()]
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
#***************************************
# Load data
#***************************************
func = 'step'
if func == 'hetero':
f = lambda X: np.cos(5*X)*np.exp(-X/2)
g = lambda X: 0.25*(np.cos(6*X)+1)*np.exp(-X)
N, Ns = 1000, 500
Xtrain = np.linspace(-2,2,N)[:,None]
Ytrain = f(Xtrain) + g(Xtrain)*np.random.normal(size=Xtrain.shape)
Xtest = np.linspace(-2,2,Ns)[:,None]
Ytest = f(Xtest) + g(Xtest)*np.random.normal(size=Xtest.shape)
elif func == 'step':
N, Ns = 500, 500
Xtrain = np.linspace(0., 1., N)[:, None]
Xtest = np.linspace(0., 1., Ns)[:, None]
f_step = lambda x: 0. if x<0.5 else 1.
g_step = lambda x: 1e-2
Ytrain = np.reshape([f_step(x) + np.random.randn() * g_step(x) for x in Xtrain], Xtrain.shape)
Ytest = np.reshape([f_step(x) + np.random.randn() * g_step(x) for x in Xtest], Xtest.shape)
elif func == 'moon':
N, Ns = 200, 500
noise = 5.0e-2
from sklearn.datasets import make_moons
data, _ = make_moons(n_samples=N, shuffle=True, noise=noise)
Xtrain, Ytrain = data[:, 0].reshape(-1, 1), data[:, 1].reshape(-1, 1)
data_test, _ = make_moons(n_samples=Ns, shuffle=True, noise=noise)
Xtest = np.sort(data_test[:, 0].reshape(-1, 1))
elif func == 'dataAssoc': # 2019-Data association with Gaussian process
N, Ns, lambda_ = 1000, 500, .4
delta = np.random.binomial(1,lambda_,size=(N,1))
noise = np.random.randn(N,1) * .15
epsilon = np.random.uniform(low=-1., high=3., size=(N, 1))
Xtrain = np.random.uniform(low=-3., high=3., size=(N, 1))
Ytrain = (1.-delta) * (np.cos(.5*np.pi*Xtrain) * np.exp(-.25*Xtrain**2) + noise) + delta * epsilon
Xtest = np.linspace(-3, 3, Ns)[:, None]
# normalization
Ymean, Ystd = np.mean(Ytrain), np.std(Ytrain)
Ytrain_norm = (Ytrain - Ymean) / Ystd
Xmean, Xstd = np.mean(Xtrain, axis=0, keepdims=True), np.std(Xtrain, axis=0, keepdims=True)
Xtrain_norm = (Xtrain - Xmean) / Xstd
Xtest_norm = (Xtest - Xmean) / Xstd
#***************************************
# Model configuration
#***************************************
num_iter = 10000 # Optimization iterations
lr = 5e-3 # Learning rate for Adam opt
num_minibatch = N # Batch size for stochastic opt
num_samples = 10 # Number of MC samples
num_predict_samples = 200 # Number of prediction samples
num_data = Xtrain.shape[0] # Training size
dimX = Xtrain.shape[1] # Input dimensions
dimY = 1 # Output dimensions
num_ind = 50 # Inducing size for f
X_placeholder = tf.placeholder(dtype = float_type,shape=[None, dimX])
Y_placeholder = tf.placeholder(dtype = float_type,shape=[None, dimY])
train_dataset = tf.data.Dataset.from_tensor_slices((X_placeholder,Y_placeholder))
train_dataset = train_dataset.shuffle(buffer_size=num_data, seed=seed).batch(num_minibatch).repeat()
train_iterator = train_dataset.make_initializable_iterator()
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes)
X,Y = iterator.get_next()
m_GP = 'SMGP'
K = 4
# kernel and inducing points initialization
class KERNEL:
kern = RBF
lengthscales = 1.
sf2 = 1.
ARD = True
input_dim = dimX
pred_kernel = KERNEL.kern(input_dim=input_dim, lengthscales=KERNEL.lengthscales, variance=KERNEL.sf2, ARD=KERNEL.ARD, name="kernel")
assign_kernel = KERNEL.kern(input_dim=input_dim, lengthscales=KERNEL.lengthscales, variance=KERNEL.sf2, ARD=KERNEL.ARD, name="kernel_alpha")
Z, Z_assign = kmeans(Xtrain_norm,num_ind)[0], kmeans(Xtrain_norm,num_ind)[0]
pred_layer = SVGP_Layer(kern=pred_kernel, Z=Z, num_inducing=num_ind, num_outputs=K)
assign_layer = SVGP_Layer(kern=assign_kernel, Z=Z_assign, num_inducing=num_ind, num_outputs=K)
# model definition
lik = Gaussian(D=K)
model = SMGP(likelihood=lik, pred_layer=pred_layer, assign_layer=assign_layer,
K=K, num_samples=num_samples, num_data=num_data)
#***************************************
# Model training
#***************************************)
lowerbound = model._build_likelihood(X,Y)
learning_rate = lr
train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(-1.*lowerbound)
# prediction ops
samples_y, samples_f = model.predict_samples(X, S=num_predict_samples)
assign = model.predict_assign(X)
fmean, fvar = model.predict_y(X)
# tensorflow variable and handle initializations
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
train_handle = sess.run(train_iterator.string_handle())
sess.run(train_iterator.initializer,{X_placeholder:Xtrain_norm, Y_placeholder:Ytrain_norm})
print('{:>5s}'.format("iter") + '{:>24s}'.format("ELBO:"))
iters = []; elbos = []
for i in range(1,num_iter+1):
try:
sess.run(train_op,feed_dict={handle:train_handle})
# print every 100 iterations
if i % 100 == 0 or i == 0:
elbo = sess.run(lowerbound,{handle:train_handle})
print('{:>5d}'.format(i) + '{:>24.6f}'.format(elbo) )
iters.append(i); elbos.append(elbo)
except KeyboardInterrupt as e:
print("stopping training")
break
#***************************************
# Prediction and Plot
#***************************************
n_batches = max(int(Xtest_norm.shape[0]/500), 1)
Ss_y, Ss_f = [], []
for X_batch in np.array_split(Xtest_norm, n_batches):
Ss_y.append(sess.run(samples_y,{X:X_batch}))
Ss_f.append(sess.run(samples_f,{X:X_batch}))
samples_y, samples_f = np.hstack(Ss_y), np.hstack(Ss_f)
mu_avg, fmu_avg = np.mean(samples_y, 0), np.mean(samples_f, 0)
samples_y_stack = np.reshape(samples_y, (num_predict_samples*Xtest_norm.shape[0],-1))
samples_f_stack = np.reshape(samples_f, (num_predict_samples*Xtest_norm.shape[0],-1))
# samples = samples * Ystd + Ymean
f, ax = plt.subplots(2, 2, figsize=(14,8))
Xt_tiled = np.tile(Xtest_norm, [num_predict_samples, 1])
ax[0,0].scatter(Xt_tiled.flatten(), samples_y_stack.flatten(), marker='+', alpha=0.01, color=mcolors.TABLEAU_COLORS['tab:red'])
ax[0,0].scatter(Xt_tiled.flatten(), samples_f_stack.flatten(), marker='+', alpha=0.01, color=mcolors.TABLEAU_COLORS['tab:blue'])
ax[0,0].scatter(Xtrain_norm, Ytrain_norm, marker='x', color='black', alpha=0.1)
ax[0,0].set_title(m_GP)
ax[0,0].set_xlabel('x')
ax[0,0].set_ylabel('y')
ax[0,0].set_ylim(1.2*min(Ytrain_norm), 1.2*max(Ytrain_norm))
ax[0,0].grid()
ax[0,1].plot(iters, elbos, 'o-', ms=8, alpha=0.5)
ax[0,1].set_xlabel('Iterations')
ax[0,1].set_ylabel('ELBO')
ax[0,1].grid()
assign_ = sess.run(assign,{X:Xtrain_norm})
ax[1,0].plot(Xtrain_norm, assign_, 'o')
ax[1,0].set_xlabel('x')
ax[1,0].set_ylabel('softmax(assignment)')
ax[1,0].grid()
fmean_, fvar_ = np.mean(sess.run(fmean,{X:Xtest_norm}),0), np.mean(sess.run(fvar,{X:Xtest_norm}),0)
lb, ub = (fmean_ - 2*fvar_**0.5), (fmean_ + 2*fvar_**0.5)
I = np.argmax(assign_, 1)
for i in range(K):
ax[1,1].plot(Xtest_norm.flatten(), fmean_[:,i], '-', alpha=1., color=colors[i])
ax[1,1].fill_between(Xtest_norm.flatten(), lb[:,i], ub[:,i], alpha=0.3, color=colors[i])
ax[1,1].scatter(Xtrain_norm, Ytrain_norm, marker='x', color='black', alpha=0.5)
ax[1,1].set_xlabel('x')
ax[1,1].set_ylabel('Pred. of GP experts')
ax[1,1].grid()
plt.tight_layout()
plt.savefig('figs/'+m_GP+'_'+func+'_toy.png')
plt.show()
| 40.546798 | 141 | 0.65472 |
5a40cb19f19431e00166e463c20d2266ba4069c5 | 112,648 | py | Python | python/ccxt/base/exchange.py | MRGLabs/ccxt | 9551c31b59a0a4f3bd59e5a8c79aeef7ddb7cf30 | [
"MIT"
] | null | null | null | python/ccxt/base/exchange.py | MRGLabs/ccxt | 9551c31b59a0a4f3bd59e5a8c79aeef7ddb7cf30 | [
"MIT"
] | null | null | null | python/ccxt/base/exchange.py | MRGLabs/ccxt | 9551c31b59a0a4f3bd59e5a8c79aeef7ddb7cf30 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Base exchange class"""
# -----------------------------------------------------------------------------
__version__ = '1.70.86'
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NetworkError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import RateLimitExceeded
# -----------------------------------------------------------------------------
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES, NO_PADDING, TRUNCATE, ROUND, ROUND_UP, ROUND_DOWN
from ccxt.base.decimal_to_precision import number_to_string
from ccxt.base.precise import Precise
# -----------------------------------------------------------------------------
# rsa jwt signing
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
# -----------------------------------------------------------------------------
# ecdsa signing
from ccxt.static_dependencies import ecdsa
from ccxt.static_dependencies import keccak
# eddsa signing
try:
import axolotl_curve25519 as eddsa
except ImportError:
eddsa = None
# -----------------------------------------------------------------------------
__all__ = [
'Exchange',
]
# -----------------------------------------------------------------------------
# Python 2 & 3
import types
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
import random
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException, ConnectionError as requestsConnectionError
# import socket
from ssl import SSLError
# import sys
import time
import uuid
import zlib
from decimal import Decimal
from time import mktime
from wsgiref.handlers import format_date_time
# -----------------------------------------------------------------------------
try:
basestring # basestring was removed in Python 3
except NameError:
basestring = str
try:
long # long integer was removed in Python 3
except NameError:
long = int
# -----------------------------------------------------------------------------
try:
import urllib.parse as _urlencode # Python 3
except ImportError:
import urllib as _urlencode # Python 2
# -----------------------------------------------------------------------------
class Exchange(object):
"""Base exchange class"""
id = None
name = None
version = None
certified = False # if certified by the CCXT dev team
pro = False # if it is integrated with CCXT Pro for WebSocket support
alias = False # whether this exchange is an alias to another exchange
# rate limiter settings
enableRateLimit = True
rateLimit = 2000 # milliseconds = seconds * 1000
timeout = 10000 # milliseconds = seconds * 1000
asyncio_loop = None
aiohttp_proxy = None
aiohttp_trust_env = False
session = None # Session () by default
verify = True # SSL verification
logger = None # logging.getLogger(__name__) by default
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
verbose = False
markets = None
symbols = None
codes = None
timeframes = None
fees = {
'trading': {
'percentage': True, # subclasses should rarely have to redefine this
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
loaded_fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
ids = None
urls = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*' # CORS origin
proxies = None
hostname = None # in case of inaccessibility of the "main" domain
apiKey = ''
secret = ''
password = ''
uid = ''
privateKey = '' # a "0x"-prefixed hexstring private key for a wallet
walletAddress = '' # the wallet address "0x"-prefixed hexstring
token = '' # reserved for HTTP auth in some cases
twofa = None
markets_by_id = None
currencies_by_id = None
precision = None
exceptions = None
limits = {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
httpExceptions = {
'422': ExchangeError,
'418': DDoSProtection,
'429': RateLimitExceeded,
'404': ExchangeNotAvailable,
'409': ExchangeNotAvailable,
'410': ExchangeNotAvailable,
'500': ExchangeNotAvailable,
'501': ExchangeNotAvailable,
'502': ExchangeNotAvailable,
'520': ExchangeNotAvailable,
'521': ExchangeNotAvailable,
'522': ExchangeNotAvailable,
'525': ExchangeNotAvailable,
'526': ExchangeNotAvailable,
'400': ExchangeNotAvailable,
'403': ExchangeNotAvailable,
'405': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'530': ExchangeNotAvailable,
'408': RequestTimeout,
'504': RequestTimeout,
'401': AuthenticationError,
'511': AuthenticationError,
}
headers = None
balance = None
orderbooks = None
orders = None
myTrades = None
trades = None
transactions = None
ohlcvs = None
tickers = None
base_currencies = None
quote_currencies = None
currencies = None
options = None # Python does not allow to define properties in run-time with setattr
accounts = None
positions = None
status = {
'status': 'ok',
'updated': None,
'eta': None,
'url': None,
}
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False, # 2-factor authentication (one-time password key)
'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet
'walletAddress': False, # the wallet address "0x"-prefixed hexstring
'token': False, # reserved for HTTP auth in some cases
}
# API method metainfo
has = {
'publicAPI': True,
'privateAPI': True,
'CORS': None,
'spot': None,
'margin': None,
'swap': None,
'future': None,
'option': None,
'addMargin': None,
'cancelAllOrders': None,
'cancelOrder': True,
'cancelOrders': None,
'createDepositAddress': None,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': None,
'editOrder': 'emulated',
'fetchAccounts': None,
'fetchBalance': True,
'fetchBidsAsks': None,
'fetchBorrowRate': None,
'fetchBorrowRateHistory': None,
'fetchBorrowRatesPerSymbol': None,
'fetchBorrowRates': None,
'fetchCanceledOrders': None,
'fetchClosedOrder': None,
'fetchClosedOrders': None,
'fetchCurrencies': 'emulated',
'fetchDeposit': None,
'fetchDepositAddress': None,
'fetchDepositAddresses': None,
'fetchDepositAddressesByNetwork': None,
'fetchDeposits': None,
'fetchFundingFee': None,
'fetchFundingFees': None,
'fetchFundingHistory': None,
'fetchFundingRate': None,
'fetchFundingRateHistory': None,
'fetchFundingRates': None,
'fetchIndexOHLCV': None,
'fetchL2OrderBook': True,
'fetchLedger': None,
'fetchLedgerEntry': None,
'fetchMarkets': True,
'fetchMarkOHLCV': None,
'fetchMyTrades': None,
'fetchOHLCV': 'emulated',
'fetchOpenOrder': None,
'fetchOpenOrders': None,
'fetchOrder': None,
'fetchOrderBook': True,
'fetchOrderBooks': None,
'fetchOrders': None,
'fetchOrderTrades': None,
'fetchPosition': None,
'fetchPositions': None,
'fetchPositionsRisk': None,
'fetchPremiumIndexOHLCV': None,
'fetchStatus': 'emulated',
'fetchTicker': True,
'fetchTickers': None,
'fetchTime': None,
'fetchTrades': True,
'fetchTradingFee': None,
'fetchTradingFees': None,
'fetchTradingLimits': None,
'fetchTransactions': None,
'fetchTransfers': None,
'fetchWithdrawal': None,
'fetchWithdrawals': None,
'loadLeverageBrackets': None,
'loadMarkets': True,
'reduceMargin': None,
'setLeverage': None,
'setMarginMode': None,
'setPositionMode': None,
'signIn': None,
'transfer': None,
'withdraw': None,
}
precisionMode = DECIMAL_PLACES
paddingMode = NO_PADDING
minFundingAddressLength = 1 # used in check_address
substituteCommonCurrencyCodes = True
quoteJsonNumbers = True
number = float # or str (a pointer to a class)
handleContentTypeApplicationZip = False
# whether fees should be summed by currency code
reduceFees = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
enableLastHttpResponse = True
enableLastJsonResponse = True
enableLastResponseHeaders = True
last_http_response = None
last_json_response = None
last_response_headers = None
requiresEddsa = False
base58_encoder = None
base58_decoder = None
# no lower case l or upper case I, O
base58_alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
}
synchronous = True
def __init__(self, config={}):
self.precision = dict() if self.precision is None else self.precision
self.limits = dict() if self.limits is None else self.limits
self.exceptions = dict() if self.exceptions is None else self.exceptions
self.headers = dict() if self.headers is None else self.headers
self.balance = dict() if self.balance is None else self.balance
self.orderbooks = dict() if self.orderbooks is None else self.orderbooks
self.tickers = dict() if self.tickers is None else self.tickers
self.trades = dict() if self.trades is None else self.trades
self.transactions = dict() if self.transactions is None else self.transactions
self.positions = dict() if self.positions is None else self.positions
self.ohlcvs = dict() if self.ohlcvs is None else self.ohlcvs
self.currencies = dict() if self.currencies is None else self.currencies
self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr
self.decimal_to_precision = decimal_to_precision
self.number_to_string = number_to_string
# version = '.'.join(map(str, sys.version_info[:3]))
# self.userAgent = {
# 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version
# }
self.origin = self.uuid()
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
# convert all properties from underscore notation foo_bar to camelcase notation fooBar
cls = type(self)
for name in dir(self):
if name[0] != '_' and name[-1] != '_' and '_' in name:
parts = name.split('_')
# fetch_ohlcv → fetchOHLCV (not fetchOhlcv!)
exceptions = {'ohlcv': 'OHLCV', 'le': 'LE', 'be': 'BE'}
camelcase = parts[0] + ''.join(exceptions.get(i, self.capitalize(i)) for i in parts[1:])
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
setattr(cls, camelcase, getattr(cls, name))
else:
setattr(self, camelcase, attr)
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit if self.rateLimit > 0 else float('inf'),
'delay': 0.001,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket', {}))
self.session = self.session if self.session or not self.synchronous else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
def __del__(self):
if self.session:
self.session.close()
def __repr__(self):
return 'ccxt.' + ('async_support.' if self.asyncio_loop else '') + self.id + '()'
def __str__(self):
return self.name
def describe(self):
return {}
def set_sandbox_mode(self, enabled):
if enabled:
if 'test' in self.urls:
self.urls['apiBackup'] = self.urls['api']
self.urls['api'] = self.urls['test']
else:
raise NotSupported(self.id + ' does not have a sandbox URL')
elif 'apiBackup' in self.urls:
self.urls['api'] = self.urls['apiBackup']
del self.urls['apiBackup']
def define_rest_api_endpoint(self, method_name, uppercase_method, lowercase_method, camelcase_method, path, paths, config={}):
cls = type(self)
entry = getattr(cls, method_name) # returns a function (instead of a bound method)
delimiters = re.compile('[^a-zA-Z0-9]')
split_path = delimiters.split(path)
lowercase_path = [x.strip().lower() for x in split_path]
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
underscore_suffix = '_'.join([x for x in lowercase_path if len(x)])
camelcase_prefix = ''
underscore_prefix = ''
if len(paths):
camelcase_prefix = paths[0]
underscore_prefix = paths[0]
if len(paths) > 1:
camelcase_prefix += ''.join([Exchange.capitalize(x) for x in paths[1:]])
underscore_prefix += '_' + '_'.join([x.strip() for p in paths[1:] for x in delimiters.split(p)])
api_argument = paths
else:
api_argument = paths[0]
camelcase = camelcase_prefix + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = underscore_prefix + '_' + lowercase_method + '_' + underscore_suffix.lower()
def partialer():
outer_kwargs = {'path': path, 'api': api_argument, 'method': uppercase_method, 'config': config}
@functools.wraps(entry)
def inner(_self, params=None, context=None):
"""
Inner is called when a generated method (publicGetX) is called.
_self is a reference to self created by function.__get__(exchange, type(exchange))
https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial
"""
inner_kwargs = dict(outer_kwargs) # avoid mutation
if params is not None:
inner_kwargs['params'] = params
if context is not None:
inner_kwargs['context'] = params
return entry(_self, **inner_kwargs)
return inner
to_bind = partialer()
setattr(cls, camelcase, to_bind)
setattr(cls, underscore, to_bind)
def define_rest_api(self, api, method_name, paths=[]):
for key, value in api.items():
uppercase_method = key.upper()
lowercase_method = key.lower()
camelcase_method = lowercase_method.capitalize()
if isinstance(value, list):
for path in value:
self.define_rest_api_endpoint(method_name, uppercase_method, lowercase_method, camelcase_method, path, paths)
# the options HTTP method conflicts with the 'options' API url path
# elif re.search(r'^(?:get|post|put|delete|options|head|patch)$', key, re.IGNORECASE) is not None:
elif re.search(r'^(?:get|post|put|delete|head|patch)$', key, re.IGNORECASE) is not None:
for [endpoint, config] in value.items():
path = endpoint.strip()
if isinstance(config, dict):
self.define_rest_api_endpoint(method_name, uppercase_method, lowercase_method, camelcase_method, path, paths, config)
elif isinstance(config, Number):
self.define_rest_api_endpoint(method_name, uppercase_method, lowercase_method, camelcase_method, path, paths, {'cost': config})
else:
raise NotSupported(self.id + ' define_rest_api() API format not supported, API leafs must strings, objects or numbers')
else:
self.define_rest_api(value, method_name, paths + [key])
def throttle(self, cost=None):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
cost = 1 if cost is None else cost
sleep_time = self.rateLimit * cost
if elapsed < sleep_time:
delay = sleep_time - elapsed
time.sleep(delay / 1000.0)
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
return self.safe_value(config, 'cost', 1)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
cost = self.calculate_rate_limiter_cost(api, method, path, params, config, context)
self.throttle(cost)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
print(f"[REQUEST]:")
for k,v in request.items():
if k != 'headers':
print(f"\t{k}: {v}")
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
"""Exchange.request is the entry point for all generated methods"""
return self.fetch2(path, api, method, params, headers, body, config, context)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def throw_exactly_matched_exception(self, exact, string, message):
if string in exact:
raise exact[string](message)
def throw_broadly_matched_exception(self, broad, string, message):
broad_key = self.find_broadly_matched_key(broad, string)
if broad_key is not None:
raise broad[broad_key](message)
def find_broadly_matched_key(self, broad, string):
"""A helper method for matching error strings exactly vs broadly"""
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None
def prepare_request_headers(self, headers=None):
headers = headers or {}
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return self.set_headers(headers)
def log(self, *args):
print(*args)
def set_headers(self, headers):
return headers
def handle_errors(self, code, reason, url, method, headers, body, response, request_headers, request_body):
pass
def on_rest_response(self, code, reason, url, method, response_headers, response_body, request_headers, request_body):
return response_body.strip()
def on_json_response(self, response_body):
if self.quoteJsonNumbers:
return json.loads(response_body, parse_float=str, parse_int=str)
else:
return json.loads(response_body)
def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.log("\nfetch Request:", self.id, method, url, "RequestHeaders:", request_headers, "RequestBody:", body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
request_body = body
if body:
body = body.encode()
self.session.cookies.clear()
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies,
verify=self.verify
)
# does not try to detect encoding
response.encoding = 'utf-8'
headers = response.headers
http_status_code = response.status_code
http_status_text = response.reason
http_response = self.on_rest_response(http_status_code, http_status_text, url, method, headers, response.text, request_headers, request_body)
json_response = self.parse_json(http_response)
# FIXME remove last_x_responses from subclasses
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.verbose:
self.log("\nfetch Response:", self.id, method, url, http_status_code, "ResponseHeaders:", headers, "ResponseBody:", http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
response.raise_for_status()
except Timeout as e:
details = ' '.join([self.id, method, url])
raise RequestTimeout(details) from e
except TooManyRedirects as e:
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
except SSLError as e:
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
except HTTPError as e:
details = ' '.join([self.id, method, url])
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_http_status_code(http_status_code, http_status_text, url, method, http_response)
raise ExchangeError(details) from e
except requestsConnectionError as e:
error_string = str(e)
details = ' '.join([self.id, method, url])
if 'Read timed out' in error_string:
raise RequestTimeout(details) from e
else:
raise NetworkError(details) from e
except ConnectionResetError as e:
error_string = str(e)
details = ' '.join([self.id, method, url])
raise NetworkError(details) from e
except RequestException as e: # base exception class
error_string = str(e)
details = ' '.join([self.id, method, url])
if any(x in error_string for x in ['ECONNRESET', 'Connection aborted.', 'Connection broken:']):
raise NetworkError(details) from e
else:
raise ExchangeError(details) from e
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
if json_response is not None:
return json_response
elif self.is_text_response(headers):
return http_response
else:
return response.content
def handle_http_status_code(self, http_status_code, http_status_text, url, method, body):
string_code = str(http_status_code)
if string_code in self.httpExceptions:
Exception = self.httpExceptions[string_code]
raise Exception(' '.join([self.id, method, url, string_code, http_status_text, body]))
def parse_json(self, http_response):
try:
if Exchange.is_json_encoded_object(http_response):
return self.on_json_response(http_response)
except ValueError: # superclass of JsonDecodeError (python2)
pass
def is_text_response(self, headers):
# https://github.com/ccxt/ccxt/issues/5302
content_type = headers.get('Content-Type', '')
return content_type.startswith('application/json') or content_type.startswith('text/')
@staticmethod
def key_exists(dictionary, key):
if dictionary is None or key is None:
return False
if isinstance(dictionary, list):
if isinstance(key, int) and 0 <= key and key < len(dictionary):
return dictionary[key] is not None
else:
return False
if key in dictionary:
return dictionary[key] is not None
return False
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if Exchange.key_exists(dictionary, key):
value = float(dictionary[key])
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_lower(dictionary, key, default_value=None):
return str(dictionary[key]).lower() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_upper(dictionary, key, default_value=None):
return str(dictionary[key]).upper() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
try:
# needed to avoid breaking on "100.0"
# https://stackoverflow.com/questions/1094717/convert-a-string-to-integer-with-decimal-in-python#1094721
return int(float(value))
except ValueError:
return default_value
except TypeError:
return default_value
@staticmethod
def safe_integer_product(dictionary, key, factor, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
if isinstance(value, Number):
return int(value * factor)
elif isinstance(value, basestring):
try:
return int(float(value) * factor)
except ValueError:
pass
return default_value
@staticmethod
def safe_timestamp(dictionary, key, default_value=None):
return Exchange.safe_integer_product(dictionary, key, 1000, default_value)
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if Exchange.key_exists(dictionary, key) else default_value
# we're not using safe_floats with a list argument as we're trying to save some cycles here
# we're not using safe_float_3 either because those cases are too rare to deserve their own optimization
@staticmethod
def safe_float_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_lower_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_lower, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_upper_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_upper, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_product_2(dictionary, key1, key2, factor, default_value=None):
value = Exchange.safe_integer_product(dictionary, key1, factor)
return value if value is not None else Exchange.safe_integer_product(dictionary, key2, factor, default_value)
@staticmethod
def safe_timestamp_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_integer_product_2(dictionary, key1, key2, 1000, default_value)
@staticmethod
def safe_value_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value)
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
"""A helper-wrapper for the safe_value_2() family."""
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value)
@staticmethod
def truncate(num, precision=0):
"""Deprecated, use decimal_to_precision instead"""
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
"""Deprecated, todo: remove references from subclasses"""
if precision > 0:
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid22(length=22):
return format(random.getrandbits(length * 4), 'x')
@staticmethod
def uuid16(length=16):
return format(random.getrandbits(length * 4), 'x')
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def uuidv1():
return str(uuid.uuid1()).replace('-', '')
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def strip(string):
return string.strip()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def merge(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
# -- diff --
for key in arg:
if result.get(key) is None:
result[key] = arg[key]
# -- enddiff --
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
array = Exchange.to_array(array)
return list(filter(lambda x: x[key] == value, array))
@staticmethod
def filterBy(array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
is_int_key = isinstance(key, int)
for element in array:
if ((is_int_key and (key < len(element))) or (key in element)) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def sort_by_2(array, key1, key2, descending=False):
return sorted(array, key=lambda k: (k[key1] if k[key1] is not None else "", k[key2] if k[key2] is not None else ""), reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
def implode_hostname(self, url):
return Exchange.implode_params(url, {'hostname': self.hostname})
@staticmethod
def implode_params(string, params):
if isinstance(params, dict):
for key in params:
if not isinstance(params[key], list):
string = string.replace('{' + key + '}', str(params[key]))
return string
@staticmethod
def urlencode(params={}, doseq=False):
for key, value in params.items():
if isinstance(value, bool):
params[key] = 'true' if value else 'false'
return _urlencode.urlencode(params, doseq)
@staticmethod
def urlencode_with_array_repeat(params={}):
return re.sub(r'%5B\d*%5D', '', Exchange.urlencode(params, True))
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri, safe="~()*!.'"):
return _urlencode.quote(uri, safe=safe)
@staticmethod
def omit(d, *args):
if isinstance(d, dict):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
return d
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume, *_] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, (int, long)):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def rfc2616(self, timestamp=None):
if timestamp is None:
ts = datetime.datetime.now()
else:
ts = timestamp
stamp = mktime(ts.timetuple())
return format_date_time(stamp)
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-', fullYear=True):
year_format = '%Y' if fullYear else '%y'
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime(year_format + infix + '%m' + infix + '%d')
@staticmethod
def yymmdd(timestamp, infix=''):
return Exchange.ymd(timestamp, infix, False)
@staticmethod
def yyyymmdd(timestamp, infix='-'):
return Exchange.ymd(timestamp, infix, True)
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
ms = (ms + '00')[0:4]
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1') * -1
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
if algorithm == 'keccak':
binary = bytes(keccak.SHA3(request))
else:
h = hashlib.new(algorithm, request)
binary = h.digest()
if digest == 'base64':
return Exchange.binary_to_base64(binary)
elif digest == 'hex':
return Exchange.binary_to_base16(binary)
return binary
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
binary = h.digest()
if digest == 'hex':
return Exchange.binary_to_base16(binary)
elif digest == 'base64':
return Exchange.binary_to_base64(binary)
return binary
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_concat_array(array):
result = bytes()
for element in array:
result = result + element
return result
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def binary_to_base64(s):
return Exchange.decode(base64.standard_b64encode(s))
@staticmethod
def base64_to_binary(s):
return base64.standard_b64decode(s)
@staticmethod
def string_to_base64(s):
# will return string in the future
binary = Exchange.encode(s) if isinstance(s, str) else s
return Exchange.encode(Exchange.binary_to_base64(binary))
@staticmethod
def base64_to_string(s):
return base64.b64decode(s).decode('utf-8')
@staticmethod
def jwt(request, secret, alg='HS256'):
algos = {
'HS256': hashlib.sha256,
'HS384': hashlib.sha384,
'HS512': hashlib.sha512,
}
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encoded_header = Exchange.base64urlencode(header)
encoded_data = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encoded_header + '.' + encoded_data
if alg[:2] == 'RS':
signature = Exchange.rsa(token, secret, alg)
else:
algorithm = algos[alg]
signature = Exchange.hmac(Exchange.encode(token), secret, algorithm, 'binary')
return token + '.' + Exchange.base64urlencode(signature)
@staticmethod
def rsa(request, secret, alg='RS256'):
algorithms = {
"RS256": hashes.SHA256(),
"RS384": hashes.SHA384(),
"RS512": hashes.SHA512(),
}
algorithm = algorithms[alg]
priv_key = load_pem_private_key(secret, None, backends.default_backend())
return priv_key.sign(Exchange.encode(request), padding.PKCS1v15(), algorithm)
@staticmethod
def ecdsa(request, secret, algorithm='p256', hash=None, fixed_length=False):
# your welcome - frosty00
algorithms = {
'p192': [ecdsa.NIST192p, 'sha256'],
'p224': [ecdsa.NIST224p, 'sha256'],
'p256': [ecdsa.NIST256p, 'sha256'],
'p384': [ecdsa.NIST384p, 'sha384'],
'p521': [ecdsa.NIST521p, 'sha512'],
'secp256k1': [ecdsa.SECP256k1, 'sha256'],
}
if algorithm not in algorithms:
raise ArgumentsRequired(algorithm + ' is not a supported algorithm')
curve_info = algorithms[algorithm]
hash_function = getattr(hashlib, curve_info[1])
encoded_request = Exchange.encode(request)
if hash is not None:
digest = Exchange.hash(encoded_request, hash, 'binary')
else:
digest = base64.b16decode(encoded_request, casefold=True)
key = ecdsa.SigningKey.from_string(base64.b16decode(Exchange.encode(secret),
casefold=True), curve=curve_info[0])
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize)
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter = 0
minimum_size = (1 << (8 * 31)) - 1
half_order = key.privkey.order / 2
while fixed_length and (r_int > half_order or r_int <= minimum_size or s_int <= minimum_size):
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize,
extra_entropy=Exchange.number_to_le(counter, 32))
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter += 1
r, s = Exchange.decode(base64.b16encode(r_binary)).lower(), Exchange.decode(base64.b16encode(s_binary)).lower()
return {
'r': r,
's': s,
'v': v,
}
@staticmethod
def eddsa(request, secret, curve='ed25519'):
random = b'\x00' * 64
request = base64.b16decode(request, casefold=True)
secret = base64.b16decode(secret, casefold=True)
signature = eddsa.calculateSignature(random, secret, request)
return Exchange.binary_to_base58(signature)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def is_json_encoded_object(input):
return (isinstance(input, basestring) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
@staticmethod
def encode(string):
return string.encode('latin-1')
@staticmethod
def decode(string):
return string.decode('latin-1')
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
@staticmethod
def check_required_version(required_version, error=True):
result = True
[major1, minor1, patch1] = required_version.split('.')
[major2, minor2, patch2] = __version__.split('.')
int_major1 = int(major1)
int_minor1 = int(minor1)
int_patch1 = int(patch1)
int_major2 = int(major2)
int_minor2 = int(minor2)
int_patch2 = int(patch2)
if int_major1 > int_major2:
result = False
if int_major1 == int_major2:
if int_minor1 > int_minor2:
result = False
elif int_minor1 == int_minor2 and int_patch1 > int_patch2:
result = False
if not result:
if error:
raise NotSupported('Your current version of CCXT is ' + __version__ + ', a newer version ' + required_version + ' is required, please, upgrade your version of CCXT')
else:
return error
return result
def check_required_credentials(self, error=True):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
if error:
raise AuthenticationError('requires `' + key + '`')
else:
return error
return True
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence"""
if address is None:
raise InvalidAddress('address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
raise InvalidAddress('address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': None,
'used': None,
'total': None,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
market = self.market(symbol)
return self.decimal_to_precision(cost, TRUNCATE, market['precision']['price'], self.precisionMode, self.paddingMode)
def price_to_precision(self, symbol, price):
market = self.market(symbol)
return self.decimal_to_precision(price, ROUND, market['precision']['price'], self.precisionMode, self.paddingMode)
def amount_to_precision(self, symbol, amount):
market = self.market(symbol)
return self.decimal_to_precision(amount, TRUNCATE, market['precision']['amount'], self.precisionMode, self.paddingMode)
def fee_to_precision(self, symbol, fee):
market = self.market(symbol)
return self.decimal_to_precision(fee, ROUND, market['precision']['price'], self.precisionMode, self.paddingMode)
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode, self.paddingMode)
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
self.fees['trading'],
{'precision': self.precision, 'limits': self.limits},
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.symbols = sorted(self.markets.keys())
self.ids = sorted(self.markets_by_id.keys())
if currencies:
self.currencies = self.deep_extend(self.currencies, currencies)
else:
base_currencies = [{
'id': market['baseId'] if (('baseId' in market) and (market['baseId'] is not None)) else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if (('quoteId' in market) and (market['quoteId'] is not None)) else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
base_currencies = self.sort_by(base_currencies, 'code')
quote_currencies = self.sort_by(quote_currencies, 'code')
self.base_currencies = self.index_by(base_currencies, 'code')
self.quote_currencies = self.index_by(quote_currencies, 'code')
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.currencies, self.index_by(currencies, 'code'))
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
self.codes = sorted(self.currencies.keys())
return self.markets
def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies'] is True:
currencies = self.fetch_currencies()
markets = self.fetch_markets(params)
return self.set_markets(markets, currencies)
def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees())
return self.loaded_fees
def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
def fetch_balance(self, params={}):
raise NotSupported('fetch_balance() not supported yet')
def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
def cancel_unified_order(self, order, params={}):
return self.cancel_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_bids_asks(self, symbols=None, params={}) -> dict:
raise NotSupported('API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_ticker(self, symbol, params={}):
if self.has['fetchTickers']:
tickers = self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise BadSymbol(self.id + ' fetchTickers could not find a ticker for ' + symbol)
else:
return ticker
else:
raise NotSupported(self.id + ' fetchTicker not supported yet')
def fetch_tickers(self, symbols=None, params={}):
raise NotSupported('API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, symbol=None, params={}):
order = self.fetch_order(id, symbol, params)
return order['status']
def fetch_order(self, id, symbol=None, params={}):
raise NotSupported('fetch_order() is not supported yet')
def fetch_unified_order(self, order, params={}):
return self.fetch_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_orders() is not supported yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_open_orders() is not supported yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_closed_orders() is not supported yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_my_trades() is not supported yet')
def fetch_order_trades(self, id, symbol=None, params={}):
raise NotSupported('fetch_order_trades() is not supported yet')
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
# def fetch_deposit_addresses(self, codes=None, params={}):
# raise NotSupported('fetch_deposit_addresses() is not supported yet')
def fetch_deposit_address(self, code, params={}):
if self.has['fetchDepositAddresses']:
deposit_addresses = self.fetch_deposit_addresses([code], params)
deposit_address = self.safe_value(deposit_addresses, code)
if deposit_address is None:
raise NotSupported(self.id + ' fetch_deposit_address could not find a deposit address for ' + code + ', make sure you have created a corresponding deposit address in your wallet on the exchange website')
else:
return deposit_address
else:
raise NotSupported(self.id + ' fetchDepositAddress not supported yet')
def parse_funding_rate(self, contract, market=None):
raise NotSupported(self.id + ' parse_funding_rate() not supported yet')
def parse_funding_rates(self, response, market=None):
result = {}
for entry in response:
parsed = self.parse_funding_rate(entry, market)
result[parsed['symbol']] = parsed
return result
def parse_ohlcv(self, ohlcv, market=None):
if isinstance(ohlcv, list):
return [
self.safe_integer(ohlcv, 0),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 5),
]
else:
return ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
parsed = [self.parse_ohlcv(ohlcv, market) for ohlcv in ohlcvs]
sorted = self.sort_by(parsed, 0)
tail = since is None
return self.filter_by_since_limit(sorted, since, limit, 0, tail)
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [self.safe_number(bidask, price_key), self.safe_number(bidask, amount_key)]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
raise ExchangeError('unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, symbol, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'symbol': symbol,
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def safe_balance(self, balance):
currencies = self.omit(balance, ['info', 'timestamp', 'datetime', 'free', 'used', 'total']).keys()
balance['free'] = {}
balance['used'] = {}
balance['total'] = {}
for currency in currencies:
if balance[currency].get('total') is None:
if balance[currency].get('free') is not None and balance[currency].get('used') is not None:
balance[currency]['total'] = Precise.string_add(balance[currency]['free'], balance[currency]['used'])
if balance[currency].get('free') is None:
if balance[currency].get('total') is not None and balance[currency].get('used') is not None:
balance[currency]['free'] = Precise.string_sub(balance[currency]['total'], balance[currency]['used'])
if balance[currency].get('used') is None:
if balance[currency].get('total') is not None and balance[currency].get('free') is not None:
balance[currency]['used'] = Precise.string_sub(balance[currency]['total'], balance[currency]['free'])
balance[currency]['free'] = self.parse_number(balance[currency]['free'])
balance[currency]['used'] = self.parse_number(balance[currency]['used'])
balance[currency]['total'] = self.parse_number(balance[currency]['total'])
balance['free'][currency] = balance[currency]['free']
balance['used'][currency] = balance[currency]['used']
balance['total'][currency] = balance[currency]['total']
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def fetch_trading_fees(self, symbol, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return self.fetch_trading_fees(params)
def fetch_funding_fees(self, params={}):
raise NotSupported('fetch_funding_fees() not supported yet')
def fetch_funding_fee(self, code, params={}):
if not self.has['fetchFundingFees']:
raise NotSupported('fetch_funding_fee() not supported yet')
return self.fetch_funding_fees(params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcvc(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not supported yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcvc(trades, timeframe, since, limit)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
ohlcvs = self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
return [ohlcv[0:-1] for ohlcv in ohlcvs]
def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = self.fetch_time(params)
self.status['updated'] = updated
return self.status
def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return self.fetch_ohlcv(symbol, timeframe, since, limit, params)
def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
result = self.convert_trading_view_to_ohlcv(ohlcvs)
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False): # noqa E741
result = []
for i in range(0, len(ohlcvs[t])):
result.append([
ohlcvs[t][i] if ms else (int(ohlcvs[t][i]) * 1000),
ohlcvs[o][i],
ohlcvs[h][i],
ohlcvs[l][i],
ohlcvs[c][i],
ohlcvs[v][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False): # noqa E741
result = {}
result[t] = []
result[o] = []
result[h] = []
result[l] = []
result[c] = []
result[v] = []
for i in range(0, len(ohlcvs)):
result[t].append(ohlcvs[i][0] if ms else int(ohlcvs[i][0] / 1000))
result[o].append(ohlcvs[i][1])
result[h].append(ohlcvs[i][2])
result[l].append(ohlcvs[i][3])
result[c].append(ohlcvs[i][4])
result[v].append(ohlcvs[i][5])
return result
def build_ohlcvc(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(timestamp, open, high, low, close, volume, count) = (0, 1, 2, 3, 4, 5, 6)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M)
j = len(ohlcvs)
candle = j - 1
if (j == 0) or opening_time >= ohlcvs[candle][timestamp] + ms:
# moved to a new timeframe -> create a new candle from opening trade
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
1, # count
])
else:
# still processing the same timeframe -> update opening trade
ohlcvs[candle][high] = max(ohlcvs[candle][high], trade['price'])
ohlcvs[candle][low] = min(ohlcvs[candle][low], trade['price'])
ohlcvs[candle][close] = trade['price']
ohlcvs[candle][volume] += trade['amount']
ohlcvs[candle][count] += 1
return ohlcvs
@staticmethod
def parse_timeframe(timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' == unit:
scale = 60 * 60 * 24 * 365
elif 'M' == unit:
scale = 60 * 60 * 24 * 30
elif 'w' == unit:
scale = 60 * 60 * 24 * 7
elif 'd' == unit:
scale = 60 * 60 * 24
elif 'h' == unit:
scale = 60 * 60
elif 'm' == unit:
scale = 60
elif 's' == unit:
scale = 1
else:
raise NotSupported('timeframe unit {} is not supported'.format(unit))
return amount * scale
@staticmethod
def round_timeframe(timeframe, timestamp, direction=ROUND_DOWN):
ms = Exchange.parse_timeframe(timeframe) * 1000
# Get offset based on timeframe in milliseconds
offset = timestamp % ms
return timestamp - offset + (ms if direction == ROUND_UP else 0)
def safe_ticker(self, ticker, market=None, legacy=True):
if legacy:
symbol = self.safe_value(ticker, 'symbol')
if symbol is None:
symbol = self.safe_symbol(None, market)
timestamp = self.safe_integer(ticker, 'timestamp')
baseVolume = self.safe_value(ticker, 'baseVolume')
quoteVolume = self.safe_value(ticker, 'quoteVolume')
vwap = self.safe_value(ticker, 'vwap')
if vwap is None:
vwap = self.vwap(baseVolume, quoteVolume)
open = self.safe_value(ticker, 'open')
close = self.safe_value(ticker, 'close')
last = self.safe_value(ticker, 'last')
change = self.safe_value(ticker, 'change')
percentage = self.safe_value(ticker, 'percentage')
average = self.safe_value(ticker, 'average')
if (last is not None) and (close is None):
close = last
elif (last is None) and (close is not None):
last = close
if (last is not None) and (open is not None):
if change is None:
change = last - open
if average is None:
average = self.sum(last, open) / 2
if (percentage is None) and (change is not None) and (open is not None) and (open > 0):
percentage = change / open * 100
if (change is None) and (percentage is not None) and (last is not None):
change = percentage / 100 * last
if (open is None) and (last is not None) and (change is not None):
open = last - change
ticker['symbol'] = symbol
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
ticker['open'] = open
ticker['close'] = close
ticker['last'] = last
ticker['vwap'] = vwap
ticker['change'] = change
ticker['percentage'] = percentage
ticker['average'] = average
return ticker
else:
open = self.safe_value(ticker, 'open')
close = self.safe_value(ticker, 'close')
last = self.safe_value(ticker, 'last')
change = self.safe_value(ticker, 'change')
percentage = self.safe_value(ticker, 'percentage')
average = self.safe_value(ticker, 'average')
vwap = self.safe_value(ticker, 'vwap')
baseVolume = self.safe_value(ticker, 'baseVolume')
quoteVolume = self.safe_value(ticker, 'quoteVolume')
if vwap is None:
vwap = Precise.string_div(quoteVolume, baseVolume)
if (last is not None) and (close is None):
close = last
elif (last is None) and (close is not None):
last = close
if (last is not None) and (open is not None):
if change is None:
change = Precise.string_sub(last, open)
if average is None:
average = Precise.string_div(Precise.string_add(last, open), '2')
if (percentage is None) and (change is not None) and (open is not None) and (Precise.string_gt(open, '0')):
percentage = Precise.string_mul(Precise.string_div(change, open), '100')
if (change is None) and (percentage is not None) and (last is not None):
change = Precise.string_div(Precise.string_mul(percentage, last), '100')
if (open is None) and (last is not None) and (change is not None):
open = Precise.string_sub(last, change)
# timestamp and symbol operations don't belong in safeTicker
# they should be done in the derived classes
return self.extend(ticker, {
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': self.safe_number(ticker, 'bidVolume'),
'ask': self.safe_number(ticker, 'ask'),
'askVolume': self.safe_number(ticker, 'askVolume'),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'open': self.parse_number(open),
'close': self.parse_number(close),
'last': self.parse_number(last),
'change': self.parse_number(change),
'percentage': self.parse_number(percentage),
'average': self.parse_number(average),
'vwap': self.parse_number(vwap),
'baseVolume': self.parse_number(baseVolume),
'quoteVolume': self.parse_number(quoteVolume),
})
def parse_tickers(self, tickers, symbols=None, params={}):
result = []
values = self.to_array(tickers)
for i in range(0, len(values)):
result.append(self.extend(self.parse_ticker(values[i]), params))
return self.filter_by_array(result, 'symbol', symbols)
def parse_deposit_addresses(self, addresses, codes=None, indexed=True, params={}):
result = []
for i in range(0, len(addresses)):
address = self.extend(self.parse_deposit_address(addresses[i]), params)
result.append(address)
if codes:
result = self.filter_by_array(result, 'currency', codes, False)
return self.index_by(result, 'currency') if indexed else result
def parse_trades(self, trades, market=None, since=None, limit=None, params={}):
array = self.to_array(trades)
array = [self.merge(self.parse_trade(trade, market), params) for trade in array]
array = self.sort_by_2(array, 'timestamp', 'id')
symbol = market['symbol'] if market else None
tail = since is None
return self.filter_by_symbol_since_limit(array, symbol, since, limit, tail)
def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}):
array = self.to_array(transactions)
array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
tail = since is None
return self.filter_by_currency_since_limit(array, code, since, limit, tail)
def parse_transfers(self, transfers, currency=None, since=None, limit=None, params={}):
array = self.to_array(transfers)
array = [self.extend(self.parse_transfer(transfer, currency), params) for transfer in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
tail = since is None
return self.filter_by_currency_since_limit(array, code, since, limit, tail)
def parse_ledger(self, data, currency=None, since=None, limit=None, params={}):
array = self.to_array(data)
result = []
for item in array:
entry = self.parse_ledger_entry(item, currency)
if isinstance(entry, list):
result += [self.extend(i, params) for i in entry]
else:
result.append(self.extend(entry, params))
result = self.sort_by(result, 'timestamp')
code = currency['code'] if currency else None
tail = since is None
return self.filter_by_currency_since_limit(result, code, since, limit, tail)
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
if isinstance(orders, list):
array = [self.extend(self.parse_order(order, market), params) for order in orders]
else:
array = [self.extend(self.parse_order(self.extend({'id': id}, order), market), params) for id, order in orders.items()]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
tail = since is None
return self.filter_by_symbol_since_limit(array, symbol, since, limit, tail)
def safe_market(self, marketId, market=None, delimiter=None):
if marketId is not None:
if self.markets_by_id is not None and marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
elif delimiter is not None:
parts = marketId.split(delimiter)
if len(parts) == 2:
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
return {
'id': marketId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
}
else:
return {
'id': marketId,
'symbol': marketId,
'base': None,
'quote': None,
'baseId': None,
'quoteId': None,
}
if market is not None:
return market
return {
'id': marketId,
'symbol': marketId,
'base': None,
'quote': None,
'baseId': None,
'quoteId': None,
}
def safe_symbol(self, marketId, market=None, delimiter=None):
market = self.safe_market(marketId, market, delimiter)
return market['symbol']
def safe_currency(self, currency_id, currency=None):
if currency_id is None and currency is not None:
return currency
if (self.currencies_by_id is not None) and (currency_id in self.currencies_by_id):
return self.currencies_by_id[currency_id]
return {
'id': currency_id,
'code': self.common_currency_code(currency_id.upper()) if currency_id is not None else currency_id
}
def safe_currency_code(self, currency_id, currency=None):
currency = self.safe_currency(currency_id, currency)
return currency['code']
def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if value is not None:
array = [entry for entry in array if entry[field] == value]
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail else array[:limit]
return array
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None, tail=False):
return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit, 'timestamp', tail)
def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None, tail=False):
return self.filter_by_value_since_limit(array, 'currency', code, since, limit, 'timestamp', tail)
def filter_by_since_limit(self, array, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail else array[:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
raise ExchangeError('Currencies not loaded')
if isinstance(code, basestring) and (code in self.currencies):
return self.currencies[code]
raise ExchangeError('Does not have currency code ' + str(code))
def market(self, symbol):
if not self.markets:
raise ExchangeError('Markets not loaded')
if isinstance(symbol, basestring):
if symbol in self.markets:
return self.markets[symbol]
elif symbol in self.markets_by_id:
return self.markets_by_id[symbol]
raise BadSymbol('{} does not have market symbol {}'.format(self.id, symbol))
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
feeSide = self.safe_string(market, 'feeSide', 'quote')
key = 'quote'
cost = None
if feeSide == 'quote':
# the fee is always in quote currency
cost = amount * price
elif feeSide == 'base':
# the fee is always in base currency
cost = amount
elif feeSide == 'get':
# the fee is always in the currency you get
cost = amount
if side == 'sell':
cost *= price
else:
key = 'base'
elif feeSide == 'give':
# the fee is always in the currency you give
cost = amount
if side == 'buy':
cost *= price
else:
key = 'base'
rate = market[takerOrMaker]
if cost is not None:
cost *= rate
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': cost,
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, side, amount, price, params={}) -> dict:
return self.create_order(symbol, 'limit', side, amount, price, params)
def create_market_order(self, symbol, side, amount, price=None, params={}) -> dict:
return self.create_order(symbol, 'market', side, amount, price, params)
def create_limit_buy_order(self, symbol, amount, price, params={}) -> dict:
return self.create_order(symbol, 'limit', 'buy', amount, price, params)
def create_limit_sell_order(self, symbol, amount, price, params={}) -> dict:
return self.create_order(symbol, 'limit', 'sell', amount, price, params)
def create_market_buy_order(self, symbol, amount, params={}) -> dict:
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}) -> dict:
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
def vwap(self, baseVolume, quoteVolume):
return (quoteVolume / baseVolume) if (quoteVolume is not None) and (baseVolume is not None) and (baseVolume > 0) else None
# -------------------------------------------------------------------------
def check_required_dependencies(self):
if self.requiresEddsa and eddsa is None:
raise NotSupported('Eddsa functionality requires python-axolotl-curve25519, install with `pip install python-axolotl-curve25519==0.4.1.post2`: https://github.com/tgalal/python-axolotl-curve25519')
def privateKeyToAddress(self, privateKey):
private_key_bytes = base64.b16decode(Exchange.encode(privateKey), True)
public_key_bytes = ecdsa.SigningKey.from_string(private_key_bytes, curve=ecdsa.SECP256k1).verifying_key.to_string()
public_key_hash = keccak.SHA3(public_key_bytes)
return '0x' + Exchange.decode(base64.b16encode(public_key_hash))[-40:].lower()
@staticmethod
def remove0x_prefix(value):
if value[:2] == '0x':
return value[2:]
return value
def hashMessage(self, message):
message_bytes = base64.b16decode(Exchange.encode(Exchange.remove0x_prefix(message)), True)
hash_bytes = keccak.SHA3(b"\x19Ethereum Signed Message:\n" + Exchange.encode(str(len(message_bytes))) + message_bytes)
return '0x' + Exchange.decode(base64.b16encode(hash_bytes)).lower()
@staticmethod
def signHash(hash, privateKey):
signature = Exchange.ecdsa(hash[-64:], privateKey, 'secp256k1', None)
return {
'r': '0x' + signature['r'],
's': '0x' + signature['s'],
'v': 27 + signature['v'],
}
def sign_message_string(self, message, privateKey):
signature = self.signMessage(message, privateKey)
return signature['r'] + Exchange.remove0x_prefix(signature['s']) + Exchange.binary_to_base16(Exchange.number_to_be(signature['v'], 1))
def signMessage(self, message, privateKey):
#
# The following comment is related to MetaMask, we use the upper type of signature prefix:
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'ETH_SIGN',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 28,
# r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2",
# s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf"
# }
#
# --------------------------------------------------------------------
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'NONE',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 27,
# r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6",
# s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394"
# }
#
message_hash = self.hashMessage(message)
signature = self.signHash(message_hash[-64:], privateKey[-64:])
return signature
def get_network(self, network, code):
network = network.upper()
aliases = {
'ETHEREUM': 'ETH',
'ETHER': 'ETH',
'ERC20': 'ETH',
'ETH': 'ETH',
'TRC20': 'TRX',
'TRON': 'TRX',
'TRX': 'TRX',
'BEP20': 'BSC',
'BSC': 'BSC',
'HRC20': 'HT',
'HECO': 'HT',
'SPL': 'SOL',
'SOL': 'SOL',
'TERRA': 'LUNA',
'LUNA': 'LUNA',
'POLYGON': 'MATIC',
'MATIC': 'MATIC',
'EOS': 'EOS',
'WAVES': 'WAVES',
'AVALANCHE': 'AVAX',
'AVAX': 'AVAX',
'QTUM': 'QTUM',
'CHZ': 'CHZ',
'NEO': 'NEO',
'ONT': 'ONT',
'RON': 'RON',
}
if network == code:
return network
elif network in aliases:
return aliases[network]
else:
raise NotSupported(self.id + ' network ' + network + ' is not yet supported')
def oath(self):
if self.twofa is not None:
return self.totp(self.twofa)
else:
raise ExchangeError(self.id + ' set .twofa to use this feature')
@staticmethod
def totp(key):
def hex_to_dec(n):
return int(n, base=16)
def base32_to_bytes(n):
missing_padding = len(n) % 8
padding = 8 - missing_padding if missing_padding > 0 else 0
padded = n.upper() + ('=' * padding)
return base64.b32decode(padded) # throws an error if the key is invalid
epoch = int(time.time()) // 30
hmac_res = Exchange.hmac(epoch.to_bytes(8, 'big'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex')
offset = hex_to_dec(hmac_res[-1]) * 2
otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff)
return otp[-6:]
@staticmethod
def number_to_le(n, size):
return int(n).to_bytes(size, 'little')
@staticmethod
def number_to_be(n, size):
return int(n).to_bytes(size, 'big')
@staticmethod
def base16_to_binary(s):
return base64.b16decode(s, True)
@staticmethod
def binary_to_base16(s):
return Exchange.decode(base64.b16encode(s)).lower()
def sleep(self, milliseconds):
return time.sleep(milliseconds / 1000)
@staticmethod
def base58_to_binary(s):
"""encodes a base58 string to as a big endian integer"""
if Exchange.base58_decoder is None:
Exchange.base58_decoder = {}
Exchange.base58_encoder = {}
for i, c in enumerate(Exchange.base58_alphabet):
Exchange.base58_decoder[c] = i
Exchange.base58_encoder[i] = c
result = 0
for i in range(len(s)):
result *= 58
result += Exchange.base58_decoder[s[i]]
return result.to_bytes((result.bit_length() + 7) // 8, 'big')
@staticmethod
def binary_to_base58(b):
if Exchange.base58_encoder is None:
Exchange.base58_decoder = {}
Exchange.base58_encoder = {}
for i, c in enumerate(Exchange.base58_alphabet):
Exchange.base58_decoder[c] = i
Exchange.base58_encoder[i] = c
result = 0
# undo decimal_to_bytes
for byte in b:
result *= 0x100
result += byte
string = []
while result > 0:
result, next_character = divmod(result, 58)
string.append(Exchange.base58_encoder[next_character])
string.reverse()
return ''.join(string)
def reduce_fees_by_currency(self, fees, string=False):
#
# self function takes a list of fee structures having the following format
#
# string = True
#
# [
# {'currency': 'BTC', 'cost': '0.1'},
# {'currency': 'BTC', 'cost': '0.2' },
# {'currency': 'BTC', 'cost': '0.2', 'rate': '0.00123'},
# {'currency': 'BTC', 'cost': '0.4', 'rate': '0.00123'},
# {'currency': 'BTC', 'cost': '0.5', 'rate': '0.00456'},
# {'currency': 'USDT', 'cost': '12.3456'},
# ]
#
# string = False
#
# [
# {'currency': 'BTC', 'cost': 0.1},
# {'currency': 'BTC', 'cost': 0.2},
# {'currency': 'BTC', 'cost': 0.2, 'rate': 0.00123},
# {'currency': 'BTC', 'cost': 0.4, 'rate': 0.00123},
# {'currency': 'BTC', 'cost': 0.5, 'rate': 0.00456},
# {'currency': 'USDT', 'cost': 12.3456},
# ]
#
# and returns a reduced fee list, where fees are summed per currency and rate(if any)
#
# string = True
#
# [
# {'currency': 'BTC', 'cost': '0.3' },
# {'currency': 'BTC', 'cost': '0.6', 'rate': '0.00123'},
# {'currency': 'BTC', 'cost': '0.5', 'rate': '0.00456'},
# {'currency': 'USDT', 'cost': '12.3456'},
# ]
#
# string = False
#
# [
# {'currency': 'BTC', 'cost': 0.3 },
# {'currency': 'BTC', 'cost': 0.6, 'rate': 0.00123},
# {'currency': 'BTC', 'cost': 0.5, 'rate': 0.00456},
# {'currency': 'USDT', 'cost': 12.3456},
# ]
#
reduced = {}
for i in range(0, len(fees)):
fee = fees[i]
feeCurrencyCode = self.safe_string(fee, 'currency')
if feeCurrencyCode is not None:
rate = self.safe_string(fee, 'rate')
cost = self.safe_value(fee, 'cost')
if not (feeCurrencyCode in reduced):
reduced[feeCurrencyCode] = {}
rateKey = '' if (rate is None) else rate
if rateKey in reduced[feeCurrencyCode]:
if string:
reduced[feeCurrencyCode][rateKey]['cost'] = Precise.string_add(reduced[feeCurrencyCode][rateKey]['cost'], cost)
else:
reduced[feeCurrencyCode][rateKey]['cost'] = self.sum(reduced[feeCurrencyCode][rateKey]['cost'], cost)
else:
reduced[feeCurrencyCode][rateKey] = {
'currency': feeCurrencyCode,
'cost': cost if string else self.parse_number(cost),
}
if rate is not None:
reduced[feeCurrencyCode][rateKey]['rate'] = rate if string else self.parse_number(rate)
result = []
feeValues = list(reduced.values())
for i in range(0, len(feeValues)):
reducedFeeValues = list(feeValues[i].values())
result = self.array_concat(result, reducedFeeValues)
return result
def safe_trade(self, trade, market=None):
amount = self.safe_string(trade, 'amount')
price = self.safe_string(trade, 'price')
cost = self.safe_string(trade, 'cost')
if cost is None:
# contract trading
contractSize = self.safe_string(market, 'contractSize')
multiplyPrice = price
if contractSize is not None:
inverse = self.safe_value(market, 'inverse', False)
if inverse:
multiplyPrice = Precise.string_div('1', price)
multiplyPrice = Precise.string_mul(multiplyPrice, contractSize)
cost = Precise.string_mul(multiplyPrice, amount)
parseFee = self.safe_value(trade, 'fee') is None
parseFees = self.safe_value(trade, 'fees') is None
shouldParseFees = parseFee or parseFees
fees = self.safe_value(trade, 'fees', [])
if shouldParseFees:
tradeFees = self.safe_value(trade, 'fees')
if tradeFees is not None:
for j in range(0, len(tradeFees)):
tradeFee = tradeFees[j]
fees.append(self.extend({}, tradeFee))
else:
tradeFee = self.safe_value(trade, 'fee')
if tradeFee is not None:
fees.append(self.extend({}, tradeFee))
fee = self.safe_value(trade, 'fee')
if shouldParseFees:
reducedFees = self.reduce_fees_by_currency(fees, True) if self.reduceFees else fees
reducedLength = len(reducedFees)
for i in range(0, reducedLength):
reducedFees[i]['cost'] = self.safe_number(reducedFees[i], 'cost')
if 'rate' in reducedFees[i]:
reducedFees[i]['rate'] = self.safe_number(reducedFees[i], 'rate')
if not parseFee and (reducedLength == 0):
fee['cost'] = self.safe_number(fee, 'cost')
if 'rate' in fee:
fee['rate'] = self.safe_number(fee, 'rate')
reducedFees.append(fee)
if parseFees:
trade['fees'] = reducedFees
if parseFee and (reducedLength == 1):
trade['fee'] = reducedFees[0]
tradeFee = self.safe_value(trade, 'fee')
if tradeFee is not None:
tradeFee['cost'] = self.safe_number(tradeFee, 'cost')
if 'rate' in tradeFee:
tradeFee['rate'] = self.safe_number(tradeFee, 'rate')
trade['fee'] = tradeFee
trade['amount'] = self.parse_number(amount)
trade['price'] = self.parse_number(price)
trade['cost'] = self.parse_number(cost)
return trade
def safe_order(self, order, market=None):
# parses numbers as strings
# it is important pass the trades as unparsed rawTrades
amount = self.omit_zero(self.safe_string(order, 'amount'))
remaining = self.safe_string(order, 'remaining')
filled = self.safe_string(order, 'filled')
cost = self.safe_string(order, 'cost')
average = self.omit_zero(self.safe_string(order, 'average'))
price = self.omit_zero(self.safe_string(order, 'price'))
lastTradeTimeTimestamp = self.safe_integer(order, 'lastTradeTimestamp')
parseFilled = (filled is None)
parseCost = (cost is None)
parseLastTradeTimeTimestamp = (lastTradeTimeTimestamp is None)
fee = self.safe_value(order, 'fee')
parseFee = (fee is None)
parseFees = self.safe_value(order, 'fees') is None
shouldParseFees = parseFee or parseFees
fees = self.safe_value(order, 'fees', [])
trades = []
if parseFilled or parseCost or shouldParseFees:
rawTrades = self.safe_value(order, 'trades', trades)
oldNumber = self.number
# we parse trades as strings here!
self.number = str
trades = self.parse_trades(rawTrades, market, None, None, {
'symbol': order['symbol'],
'side': order['side'],
'type': order['type'],
'order': order['id'],
})
self.number = oldNumber
if isinstance(trades, list) and len(trades):
# move properties that are defined in trades up into the order
if order['symbol'] is None:
order['symbol'] = trades[0]['symbol']
if order['side'] is None:
order['side'] = trades[0]['side']
if order['type'] is None:
order['type'] = trades[0]['type']
if order['id'] is None:
order['id'] = trades[0]['order']
if parseFilled:
filled = '0'
if parseCost:
cost = '0'
for i in range(0, len(trades)):
trade = trades[i]
tradeAmount = self.safe_string(trade, 'amount')
if parseFilled and (tradeAmount is not None):
filled = Precise.string_add(filled, tradeAmount)
tradeCost = self.safe_string(trade, 'cost')
if parseCost and (tradeCost is not None):
cost = Precise.string_add(cost, tradeCost)
tradeTimestamp = self.safe_value(trade, 'timestamp')
if parseLastTradeTimeTimestamp and (tradeTimestamp is not None):
if lastTradeTimeTimestamp is None:
lastTradeTimeTimestamp = tradeTimestamp
else:
lastTradeTimeTimestamp = max(lastTradeTimeTimestamp, tradeTimestamp)
if shouldParseFees:
tradeFees = self.safe_value(trade, 'fees')
if tradeFees is not None:
for j in range(0, len(tradeFees)):
tradeFee = tradeFees[j]
fees.append(self.extend({}, tradeFee))
else:
tradeFee = self.safe_value(trade, 'fee')
if tradeFee is not None:
fees.append(self.extend({}, tradeFee))
if shouldParseFees:
reducedFees = self.reduce_fees_by_currency(fees, True) if self.reduceFees else fees
reducedLength = len(reducedFees)
for i in range(0, reducedLength):
reducedFees[i]['cost'] = self.parse_number(reducedFees[i]['cost'])
if 'rate' in reducedFees[i]:
reducedFees[i]['rate'] = self.parse_number(reducedFees[i]['rate'])
if not parseFee and (reducedLength == 0):
fee['cost'] = self.safe_number(fee, 'cost')
if 'rate' in fee:
fee['rate'] = self.parse_number(fee['rate'])
reducedFees.append(fee)
if parseFees:
order['fees'] = reducedFees
if parseFee and (reducedLength == 1):
order['fee'] = reducedFees[0]
if amount is None:
# ensure amount = filled + remaining
if filled is not None and remaining is not None:
amount = Precise.string_add(filled, remaining)
elif self.safe_string(order, 'status') == 'closed':
amount = filled
if filled is None:
if amount is not None and remaining is not None:
filled = Precise.string_sub(amount, remaining)
if remaining is None:
if amount is not None and filled is not None:
remaining = Precise.string_sub(amount, filled)
# ensure that the average field is calculated correctly
if average is None:
if (filled is not None) and (cost is not None) and Precise.string_gt(filled, '0'):
average = Precise.string_div(cost, filled)
# also ensure the cost field is calculated correctly
costPriceExists = (average is not None) or (price is not None)
if parseCost and (filled is not None) and costPriceExists:
multiplyPrice = None
if average is None:
multiplyPrice = price
else:
multiplyPrice = average
# contract trading
contractSize = self.safe_string(market, 'contractSize')
if contractSize is not None:
inverse = self.safe_value(market, 'inverse', False)
if inverse:
multiplyPrice = Precise.string_div('1', multiplyPrice)
multiplyPrice = Precise.string_mul(multiplyPrice, contractSize)
cost = Precise.string_mul(multiplyPrice, filled)
# support for market orders
orderType = self.safe_value(order, 'type')
emptyPrice = (price is None) or Precise.string_equals(price, '0')
if emptyPrice and (orderType == 'market'):
price = average
# we have trades with string values at self point so we will mutate them
for i in range(0, len(trades)):
entry = trades[i]
entry['amount'] = self.safe_number(entry, 'amount')
entry['price'] = self.safe_number(entry, 'price')
entry['cost'] = self.safe_number(entry, 'cost')
fee = self.safe_value(entry, 'fee', {})
fee['cost'] = self.safe_number(fee, 'cost')
if 'rate' in fee:
fee['rate'] = self.safe_number(fee, 'rate')
entry['fee'] = fee
return self.extend(order, {
'lastTradeTimestamp': lastTradeTimeTimestamp,
'price': self.parse_number(price),
'amount': self.parse_number(amount),
'cost': self.parse_number(cost),
'average': self.parse_number(average),
'filled': self.parse_number(filled),
'remaining': self.parse_number(remaining),
'trades': trades,
})
def parse_number(self, value, default=None):
if value is None:
return default
else:
try:
return self.number(value)
except Exception:
return default
def safe_number(self, dictionary, key, default=None):
value = self.safe_string(dictionary, key)
return self.parse_number(value, default)
def safe_number_2(self, dictionary, key1, key2, default=None):
value = self.safe_string_2(dictionary, key1, key2)
return self.parse_number(value, default)
def parse_precision(self, precision):
if precision is None:
return None
return '1e' + Precise.string_neg(precision)
def omit_zero(self, string_number):
if string_number is None or string_number == '':
return None
if float(string_number) == 0:
return None
return string_number
def handle_withdraw_tag_and_params(self, tag, params):
if isinstance(tag, dict):
params = self.extend(tag, params)
tag = None
if tag is None:
tag = self.safe_string(params, 'tag')
if tag is not None:
params = self.omit(params, 'tag')
return [tag, params]
def get_supported_mapping(self, key, mapping={}):
# Takes a key and a dictionary, and returns the dictionary's value for that key
# :throws:
# NotSupported if the dictionary does not contain the key
if (key in mapping):
return mapping[key]
else:
raise NotSupported(self.id + ' ' + key + ' does not have a value in mapping')
def fetch_borrow_rate(self, code, params={}):
self.load_markets()
if not self.has['fetchBorrowRates']:
raise NotSupported(self.id + 'fetchBorrowRate() is not supported yet')
borrow_rates = self.fetch_borrow_rates(params)
rate = self.safe_value(borrow_rates, code)
if rate is None:
raise ExchangeError(self.id + 'fetchBorrowRate() could not find the borrow rate for currency code ' + code)
return rate
def handle_market_type_and_params(self, method_name, market=None, params={}):
default_type = self.safe_string_2(self.options, 'defaultType', 'type', 'spot')
method_options = self.safe_value(self.options, method_name)
method_type = default_type
if method_options is not None:
if isinstance(method_options, str):
method_type = method_options
else:
method_type = self.safe_string_2(method_options, 'defaultType', 'type')
market_type = method_type if market is None else market['type']
type = self.safe_string_2(params, 'defaultType', 'type', market_type)
params = self.omit(params, ['defaultType', 'type'])
return [type, params]
def load_time_difference(self, params={}):
server_time = self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - server_time
return self.options['timeDifference']
| 40.97781 | 219 | 0.583375 |
021a2bc48846118e82c81c292f3c0915764ab4e8 | 2,526 | py | Python | CamCall.py | hammadahmad120/Camera-Calibration | 7f2ef8f789696420b9670afab65020d7b5d962a5 | [
"MIT"
] | null | null | null | CamCall.py | hammadahmad120/Camera-Calibration | 7f2ef8f789696420b9670afab65020d7b5d962a5 | [
"MIT"
] | null | null | null | CamCall.py | hammadahmad120/Camera-Calibration | 7f2ef8f789696420b9670afab65020d7b5d962a5 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import glob
# Define the chess board rows and columns
rows = 7
cols = 6
# Set the termination criteria for the corner sub-pixel algorithm
criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 30, 0.001)
# Prepare the object points: (0,0,0), (1,0,0), (2,0,0), ..., (6,5,0). They are the same for all images
objectPoints = np.zeros((rows * cols, 3), np.float32) #array of floating zeros of given size
objectPoints[:, :2] = np.mgrid[0:rows, 0:cols].T.reshape(-1, 2)
# Create the arrays to store the object points and the image points
objectPointsArray = []
imgPointsArray = []
# Loop over the image files
for path in glob.glob('D:/Images/*.jpg'):
# Load the image and convert it to gray scale
img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (rows, cols), None)
# Make sure the chess board pattern was found in the image
if ret:
# Refine the corner position
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
# Add the object points i.e 3d points and the image points i.e 2d points to the arrays
objectPointsArray.append(objectPoints)
imgPointsArray.append(corners)
# Draw the corners on the image
cv2.drawChessboardCorners(img, (rows, cols), corners, ret)
# Display the image
cv2.imshow('chess board', img)
cv2.waitKey(500)
# Calibrate the camera and save the results
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objectPointsArray, imgPointsArray, gray.shape[::-1], None, None)
np.savez('D:/Images/calib.npz', mtx=mtx, dist=dist, rvecs=rvecs, tvecs=tvecs)
# Print the camera calibration error
error = 0
for i in range(len(objectPointsArray)):
imgPoints, _ = cv2.projectPoints(objectPointsArray[i], rvecs[i], tvecs[i], mtx, dist)
error += cv2.norm(imgPointsArray[i], imgPoints, cv2.NORM_L2) / len(imgPoints)
print("Total error: ", error / len(objectPointsArray))
# Load one of the test images
img = cv2.imread('D:/Images/left08.jpg')
h, w = img.shape[:2] #:2 means get only height and width of image
# Obtain the new camera matrix and undistort the image
newCameraMtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
undistortedImg = cv2.undistort(img, mtx, dist, None, newCameraMtx)
# Display the final result
cv2.imshow('chess board', np.hstack((img, undistortedImg)))
cv2.waitKey(0)
cv2.destroyAllWindows() | 36.608696 | 115 | 0.699525 |
713487de41ab94dfb63082f3a6ffbe1c8da745ba | 5,469 | py | Python | src/pymor/la/gram_schmidt.py | fameyer/pymorWin | b449a38754fddb719d554f1aacf9280a585f1168 | [
"Unlicense"
] | null | null | null | src/pymor/la/gram_schmidt.py | fameyer/pymorWin | b449a38754fddb719d554f1aacf9280a585f1168 | [
"Unlicense"
] | null | null | null | src/pymor/la/gram_schmidt.py | fameyer/pymorWin | b449a38754fddb719d554f1aacf9280a585f1168 | [
"Unlicense"
] | null | null | null | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
#
# Contributors: Andreas Buhr <andreas@andreasbuhr.de>
from __future__ import absolute_import, division, print_function
import numpy as np
from pymor.core.defaults import defaults
from pymor.core.exceptions import AccuracyError
from pymor.core.logger import getLogger
from pymor.tools.floatcmp import float_cmp_all
@defaults('atol', 'rtol', 'find_duplicates', 'reiterate', 'reiteration_threshold', 'check', 'check_tol')
def gram_schmidt(A, product=None, atol=1e-13, rtol=1e-13, offset=0, find_duplicates=True,
reiterate=True, reiteration_threshold=1e-1, check=True, check_tol=1e-3,
copy=False):
"""Orthonormalize a |VectorArray| using the Gram-Schmidt algorithm.
Parameters
----------
A
The |VectorArray| which is to be orthonormalized.
product
The scalar product w.r.t. which to orthonormalize, given as a linear
|Operator|. If `None` the Euclidean product is used.
atol
Vectors of norm smaller than `atol` are removed from the array.
rtol
Relative tolerance to determine a linear dependent vector.
offset
Assume that the first `offset` vectors are already orthogonal and start the
algorithm at the `offset + 1`-th vector.
find_duplicates
If `True`, eliminate duplicate vectors before the main loop.
reiterate
If `True`, orthonormalize again if the norm of the orthogonalized vector is
much smaller than the norm of the original vector.
reiteration_threshold
If `reiterate` is `True`, re-orthonormalize if the ratio between the norms of
the orthogonalized vector and the original vector is smaller than this value.
check
If `True`, check if the resulting VectorArray is really orthonormal.
check_tol
Tolerance for the check.
copy
If `True`, create a copy of `A` instead of working directly on `A`.
Returns
-------
The orthonormalized |VectorArray|.
"""
logger = getLogger('pymor.la.gram_schmidt.gram_schmidt')
if copy:
A = A.copy()
# find duplicate vectors since in some circumstances these cannot be detected in the main loop
# (is this really needed or is in this cases the tolerance poorly chosen anyhow)
if find_duplicates:
i = 0
while i < len(A):
duplicates = A.almost_equal(A, ind=i, o_ind=np.arange(max(offset, i + 1), len(A)))
if np.any(duplicates):
A.remove(np.where(duplicates)[0])
logger.info("Removing duplicate vectors")
i += 1
# main loop
remove = []
norm = None
for i in xrange(offset, len(A)):
# first calculate norm
if product is None:
initial_norm = A.l2_norm(ind=i)[0]
else:
initial_norm = np.sqrt(product.apply2(A, A, V_ind=i, U_ind=i, pairwise=True))[0]
if initial_norm < atol:
logger.info("Removing vector {} of norm {}".format(i, initial_norm))
remove.append(i)
continue
if i == 0:
A.scal(1/initial_norm, ind=0)
else:
first_iteration = True
# If reiterate is True, reiterate as long as the norm of the vector changes
# strongly during orthonormalization (due to Andreas Buhr).
while first_iteration or reiterate and norm < reiteration_threshold:
if first_iteration:
first_iteration = False
else:
logger.info('Orthonormalizing vector {} again'.format(i))
# orthogonalize to all vectors left
for j in xrange(i):
if j in remove:
continue
if product is None:
p = A.dot(A, ind=i, o_ind=j, pairwise=True)[0]
else:
p = product.apply2(A, A, V_ind=i, U_ind=j, pairwise=True)[0]
A.axpy(-p, A, ind=i, x_ind=j)
# calculate new norm
if product is None:
norm = A.l2_norm(ind=i)[0]
else:
norm = np.sqrt(product.apply2(A, A, V_ind=i, U_ind=i, pairwise=True))[0]
# remove vector if it got too small:
if norm / initial_norm < rtol:
logger.info("Removing linear dependent vector {}".format(i))
remove.append(i)
break
A.scal(1 / norm, ind=i)
if remove:
A.remove(remove)
if check:
if not product and not float_cmp_all(A.dot(A, pairwise=False), np.eye(len(A)),
atol=check_tol, rtol=0.):
err = np.max(np.abs(A.dot(A, pairwise=False) - np.eye(len(A))))
raise AccuracyError('result not orthogonal (max err={})'.format(err))
elif product and not float_cmp_all(product.apply2(A, A, pairwise=False), np.eye(len(A)),
atol=check_tol, rtol=0.):
err = np.max(np.abs(product.apply2(A, A, pairwise=False) - np.eye(len(A))))
raise AccuracyError('result not orthogonal (max err={})'.format(err))
return A
| 38.514085 | 104 | 0.592064 |
ad1b9aefae11d381f5b45762bca73c1c5ffbd0a9 | 10,651 | py | Python | stai/server/rate_limits.py | STATION-I/STAI-blockchain | a8ca05cbd2602eee7c2e4ce49c74c447a091ef0f | [
"Apache-2.0"
] | null | null | null | stai/server/rate_limits.py | STATION-I/STAI-blockchain | a8ca05cbd2602eee7c2e4ce49c74c447a091ef0f | [
"Apache-2.0"
] | null | null | null | stai/server/rate_limits.py | STATION-I/STAI-blockchain | a8ca05cbd2602eee7c2e4ce49c74c447a091ef0f | [
"Apache-2.0"
] | null | null | null | import dataclasses
import logging
import time
from collections import Counter
from typing import Optional
from stai.protocols.protocol_message_types import ProtocolMessageTypes
from stai.server.outbound_message import Message
log = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class RLSettings:
frequency: int
max_size: int
max_total_size: Optional[int] = None
DEFAULT_SETTINGS = RLSettings(100, 1024 * 1024, 100 * 1024 * 1024)
# All non-transaction apis also have an aggregate limit
NON_TX_FREQ = 1000
NON_TX_MAX_TOTAL_SIZE = 100 * 1024 * 1024
# The three values in the tuple correspond to the three limits above
# The third is optional
rate_limits_tx = {
ProtocolMessageTypes.new_transaction: RLSettings(5000, 100, 5000 * 100),
ProtocolMessageTypes.request_transaction: RLSettings(5000, 100, 5000 * 100),
ProtocolMessageTypes.respond_transaction: RLSettings(5000, 1 * 1024 * 1024, 20 * 1024 * 1024), # TODO: check this
ProtocolMessageTypes.send_transaction: RLSettings(5000, 1024 * 1024),
ProtocolMessageTypes.transaction_ack: RLSettings(5000, 2048),
}
rate_limits_other = {
ProtocolMessageTypes.handshake: RLSettings(5, 10 * 1024, 5 * 10 * 1024),
ProtocolMessageTypes.harvester_handshake: RLSettings(5, 1024 * 1024),
ProtocolMessageTypes.new_signage_point_harvester: RLSettings(100, 1024),
ProtocolMessageTypes.new_proof_of_space: RLSettings(100, 2048),
ProtocolMessageTypes.request_signatures: RLSettings(100, 2048),
ProtocolMessageTypes.respond_signatures: RLSettings(100, 2048),
ProtocolMessageTypes.new_signage_point: RLSettings(200, 2048),
ProtocolMessageTypes.declare_proof_of_space: RLSettings(100, 10 * 1024),
ProtocolMessageTypes.request_signed_values: RLSettings(100, 512),
ProtocolMessageTypes.farming_info: RLSettings(100, 1024),
ProtocolMessageTypes.signed_values: RLSettings(100, 1024),
ProtocolMessageTypes.new_peak_timelord: RLSettings(100, 20 * 1024),
ProtocolMessageTypes.new_unfinished_block_timelord: RLSettings(100, 10 * 1024),
ProtocolMessageTypes.new_signage_point_vdf: RLSettings(100, 100 * 1024),
ProtocolMessageTypes.new_infusion_point_vdf: RLSettings(100, 100 * 1024),
ProtocolMessageTypes.new_end_of_sub_slot_vdf: RLSettings(100, 100 * 1024),
ProtocolMessageTypes.request_compact_proof_of_time: RLSettings(100, 10 * 1024),
ProtocolMessageTypes.respond_compact_proof_of_time: RLSettings(100, 100 * 1024),
ProtocolMessageTypes.new_peak: RLSettings(200, 512),
ProtocolMessageTypes.request_proof_of_weight: RLSettings(5, 100),
ProtocolMessageTypes.respond_proof_of_weight: RLSettings(5, 50 * 1024 * 1024, 100 * 1024 * 1024),
ProtocolMessageTypes.request_block: RLSettings(200, 100),
ProtocolMessageTypes.reject_block: RLSettings(200, 100),
ProtocolMessageTypes.request_blocks: RLSettings(500, 100),
ProtocolMessageTypes.respond_blocks: RLSettings(100, 50 * 1024 * 1024, 5 * 50 * 1024 * 1024),
ProtocolMessageTypes.reject_blocks: RLSettings(100, 100),
ProtocolMessageTypes.respond_block: RLSettings(200, 2 * 1024 * 1024, 10 * 2 * 1024 * 1024),
ProtocolMessageTypes.new_unfinished_block: RLSettings(200, 100),
ProtocolMessageTypes.request_unfinished_block: RLSettings(200, 100),
ProtocolMessageTypes.respond_unfinished_block: RLSettings(200, 2 * 1024 * 1024, 10 * 2 * 1024 * 1024),
ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot: RLSettings(200, 200),
ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot: RLSettings(200, 200),
ProtocolMessageTypes.respond_signage_point: RLSettings(200, 50 * 1024),
ProtocolMessageTypes.respond_end_of_sub_slot: RLSettings(100, 50 * 1024),
ProtocolMessageTypes.request_mempool_transactions: RLSettings(5, 1024 * 1024),
ProtocolMessageTypes.request_compact_vdf: RLSettings(200, 1024),
ProtocolMessageTypes.respond_compact_vdf: RLSettings(200, 100 * 1024),
ProtocolMessageTypes.new_compact_vdf: RLSettings(100, 1024),
ProtocolMessageTypes.request_peers: RLSettings(10, 100),
ProtocolMessageTypes.respond_peers: RLSettings(10, 1 * 1024 * 1024),
ProtocolMessageTypes.request_puzzle_solution: RLSettings(1000, 100),
ProtocolMessageTypes.respond_puzzle_solution: RLSettings(1000, 1024 * 1024),
ProtocolMessageTypes.reject_puzzle_solution: RLSettings(1000, 100),
ProtocolMessageTypes.new_peak_wallet: RLSettings(200, 300),
ProtocolMessageTypes.request_block_header: RLSettings(500, 100),
ProtocolMessageTypes.respond_block_header: RLSettings(500, 500 * 1024),
ProtocolMessageTypes.reject_header_request: RLSettings(500, 100),
ProtocolMessageTypes.request_removals: RLSettings(500, 50 * 1024, 10 * 1024 * 1024),
ProtocolMessageTypes.respond_removals: RLSettings(500, 1024 * 1024, 10 * 1024 * 1024),
ProtocolMessageTypes.reject_removals_request: RLSettings(500, 100),
ProtocolMessageTypes.request_additions: RLSettings(500, 1024 * 1024, 10 * 1024 * 1024),
ProtocolMessageTypes.respond_additions: RLSettings(500, 1024 * 1024, 10 * 1024 * 1024),
ProtocolMessageTypes.reject_additions_request: RLSettings(500, 100),
ProtocolMessageTypes.request_header_blocks: RLSettings(500, 100),
ProtocolMessageTypes.reject_header_blocks: RLSettings(100, 100),
ProtocolMessageTypes.respond_header_blocks: RLSettings(500, 2 * 1024 * 1024, 100 * 1024 * 1024),
ProtocolMessageTypes.request_peers_introducer: RLSettings(100, 100),
ProtocolMessageTypes.respond_peers_introducer: RLSettings(100, 1024 * 1024),
ProtocolMessageTypes.farm_new_block: RLSettings(200, 200),
ProtocolMessageTypes.request_plots: RLSettings(10, 10 * 1024 * 1024),
ProtocolMessageTypes.respond_plots: RLSettings(10, 100 * 1024 * 1024),
ProtocolMessageTypes.coin_state_update: RLSettings(1000, 100 * 1024 * 1024),
ProtocolMessageTypes.register_interest_in_puzzle_hash: RLSettings(1000, 100 * 1024 * 1024),
ProtocolMessageTypes.respond_to_ph_update: RLSettings(1000, 100 * 1024 * 1024),
ProtocolMessageTypes.register_interest_in_coin: RLSettings(1000, 100 * 1024 * 1024),
ProtocolMessageTypes.respond_to_coin_update: RLSettings(1000, 100 * 1024 * 1024),
ProtocolMessageTypes.request_ses_hashes: RLSettings(2000, 1 * 1024 * 1024),
ProtocolMessageTypes.respond_ses_hashes: RLSettings(2000, 1 * 1024 * 1024),
ProtocolMessageTypes.request_children: RLSettings(2000, 1024 * 1024),
ProtocolMessageTypes.respond_children: RLSettings(2000, 1 * 1024 * 1024),
}
# TODO: only full node disconnects based on rate limits
class RateLimiter:
incoming: bool
reset_seconds: int
current_minute: int
message_counts: Counter
message_cumulative_sizes: Counter
percentage_of_limit: int
non_tx_message_counts: int = 0
non_tx_cumulative_size: int = 0
def __init__(self, incoming: bool, reset_seconds=60, percentage_of_limit=100):
"""
The incoming parameter affects whether counters are incremented
unconditionally or not. For incoming messages, the counters are always
incremeneted. For outgoing messages, the counters are only incremented
if they are allowed to be sent by the rate limiter, since we won't send
the messages otherwise.
"""
self.incoming = incoming
self.reset_seconds = reset_seconds
self.current_minute = time.time() // reset_seconds
self.message_counts = Counter()
self.message_cumulative_sizes = Counter()
self.percentage_of_limit = percentage_of_limit
self.non_tx_message_counts = 0
self.non_tx_cumulative_size = 0
def process_msg_and_check(self, message: Message) -> bool:
"""
Returns True if message can be processed successfully, false if a rate limit is passed.
"""
current_minute = int(time.time() // self.reset_seconds)
if current_minute != self.current_minute:
self.current_minute = current_minute
self.message_counts = Counter()
self.message_cumulative_sizes = Counter()
self.non_tx_message_counts = 0
self.non_tx_cumulative_size = 0
try:
message_type = ProtocolMessageTypes(message.type)
except Exception as e:
log.warning(f"Invalid message: {message.type}, {e}")
return True
new_message_counts: int = self.message_counts[message_type] + 1
new_cumulative_size: int = self.message_cumulative_sizes[message_type] + len(message.data)
new_non_tx_count: int = self.non_tx_message_counts
new_non_tx_size: int = self.non_tx_cumulative_size
proportion_of_limit: float = self.percentage_of_limit / 100
ret: bool = False
try:
limits = DEFAULT_SETTINGS
if message_type in rate_limits_tx:
limits = rate_limits_tx[message_type]
elif message_type in rate_limits_other:
limits = rate_limits_other[message_type]
new_non_tx_count = self.non_tx_message_counts + 1
new_non_tx_size = self.non_tx_cumulative_size + len(message.data)
if new_non_tx_count > NON_TX_FREQ * proportion_of_limit:
return False
if new_non_tx_size > NON_TX_MAX_TOTAL_SIZE * proportion_of_limit:
return False
else:
log.warning(f"Message type {message_type} not found in rate limits")
if limits.max_total_size is None:
limits = dataclasses.replace(limits, max_total_size=limits.frequency * limits.max_size)
assert limits.max_total_size is not None
if new_message_counts > limits.frequency * proportion_of_limit:
return False
if len(message.data) > limits.max_size:
return False
if new_cumulative_size > limits.max_total_size * proportion_of_limit:
return False
ret = True
return True
finally:
if self.incoming or ret:
# now that we determined that it's OK to send the message, commit the
# updates to the counters. Alternatively, if this was an
# incoming message, we already received it and it should
# increment the counters unconditionally
self.message_counts[message_type] = new_message_counts
self.message_cumulative_sizes[message_type] = new_cumulative_size
self.non_tx_message_counts = new_non_tx_count
self.non_tx_cumulative_size = new_non_tx_size
| 51.703883 | 118 | 0.729791 |
39a01f88b7619e2486ec6290aaa3b2dbc1d00a1c | 16,050 | py | Python | venv/lib/python2.7/site-packages/setuptools/config.py | Baw25/HomeSavvy | e07fb6f78e6f68fb981c92b15df5eef981e4d0ea | [
"MIT"
] | 35 | 2016-09-22T22:53:14.000Z | 2020-02-13T15:12:21.000Z | venv/lib/python2.7/site-packages/setuptools/config.py | Baw25/HomeSavvy | e07fb6f78e6f68fb981c92b15df5eef981e4d0ea | [
"MIT"
] | 28 | 2020-03-04T22:01:48.000Z | 2022-03-12T00:59:47.000Z | venv/lib/python2.7/site-packages/setuptools/config.py | Baw25/HomeSavvy | e07fb6f78e6f68fb981c92b15df5eef981e4d0ea | [
"MIT"
] | 88 | 2016-11-27T02:16:11.000Z | 2020-02-28T05:10:26.000Z | from __future__ import absolute_import, unicode_literals
import io
import os
import sys
from collections import defaultdict
from functools import partial
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.py26compat import import_module
from six import string_types
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
obj_alias = handler.section_prefix
target_obj = handler.target_obj
for option in handler.set_options:
getter = getattr(target_obj, 'get_%s' % option, None)
if getter is None:
value = getattr(target_obj, option)
else:
value = getter()
config_dict[obj_alias][option] = value
return config_dict
def parse_configuration(
distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
meta = ConfigMetadataHandler(
distribution.metadata, command_options, ignore_option_errors)
meta.parse()
options = ConfigOptionsHandler(
distribution, command_options, ignore_option_errors)
options.parse()
return [meta, options]
class ConfigHandler(object):
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, '').strip('.')
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, 'set_%s' % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
'Unable to parse option value to dict: %s' % value)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
include: LICENSE
include: src/file.txt
:param str value:
:rtype: str
"""
if not isinstance(value, string_types):
return value
include_directive = 'file:'
if not value.startswith(include_directive):
return value
current_directory = os.getcwd()
filepath = value.replace(include_directive, '').strip()
filepath = os.path.abspath(filepath)
if not filepath.startswith(current_directory):
raise DistutilsOptionError(
'`file:` directive can not access %s' % filepath)
if os.path.isfile(filepath):
with io.open(filepath, encoding='utf-8') as f:
value = f.read()
return value
@classmethod
def _parse_attr(cls, value):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
sys.path.insert(0, os.getcwd())
try:
module = import_module(module_name)
value = getattr(module, attr_name)
finally:
sys.path = sys.path[1:]
return value
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method = getattr(
self,
# Dots in section names are tranlsated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None)
if section_parser_method is None:
raise DistutilsOptionError(
'Unsupported distribution option section: [%s.%s]' % (
self.section_prefix, section_name))
section_parser_method(section_options)
class ConfigMetadataHandler(ConfigHandler):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': parse_list,
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': parse_file,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
}
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_attr(value)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, '__iter__'):
version = '.'.join(map(str, version))
else:
version = '%s' % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = 'options'
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=';')
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
'zip_safe': parse_bool,
'use_2to3': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'use_2to3_fixers': parse_list,
'use_2to3_exclude_fixers': parse_list,
'convert_2to3_doctests': parse_list,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': parse_list,
'install_requires': parse_list_semicolon,
'setup_requires': parse_list_semicolon,
'tests_require': parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directive = 'find:'
if not value.startswith(find_directive):
return self._parse_list(value)
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {}))
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(
section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v])
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get('*')
if root:
parsed[''] = root
del parsed['*']
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(
section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
| 29.288321 | 79 | 0.614019 |
8771deeec67c8eb24d1026815eb25d48a01abdf4 | 17,387 | py | Python | bin/generate_dmtn.py | lsst-dm/dmtn-158 | 1999ef9fbe5ee016a193131a3d096a4df447ee16 | [
"CC-BY-4.0"
] | null | null | null | bin/generate_dmtn.py | lsst-dm/dmtn-158 | 1999ef9fbe5ee016a193131a3d096a4df447ee16 | [
"CC-BY-4.0"
] | 1 | 2020-08-18T15:59:42.000Z | 2020-08-18T15:59:42.000Z | bin/generate_dmtn.py | lsst-dm/dmtn-158 | 1999ef9fbe5ee016a193131a3d096a4df447ee16 | [
"CC-BY-4.0"
] | null | null | null | import glob
import os.path
import subprocess
import textwrap
from abc import ABC, abstractmethod
from datetime import datetime
from io import StringIO
from contextlib import contextmanager
from milestones import (
add_rst_citations,
get_latest_pmcs_path,
get_local_data_path,
load_milestones,
write_output,
)
HEADING_CHARS = '#=-^"'
WBS_DEFINITIONS = {
"02C.00": "Data Management Level 2 Milestones",
"02C.01": "System Management",
"02C.02": "Systems Engineering",
"02C.03": "Alert Production",
"02C.04": "Data Release Production",
"02C.05": "Science User Interface and Tools",
"02C.06": "Science Data Archive and Application Services",
"02C.07": "LSST Data Facility",
"02C.08": "International Communications and Base Site",
"02C.09": "System Level Testing & Science Validation (Obsolete)",
"02C.10": "Science Quality and Reliability Engineering",
}
def underline(text, character, overline=False):
line = character * len(text) + "\n"
return f"{line if overline else ''}{text}\n{line}".strip()
def add_context(context_name, context_manager, *, needs_level=False):
def wrapper(cls):
@contextmanager
def new_method(self, *args, **kwargs):
if needs_level:
level = self._level + 1 if hasattr(self, "_level") else 1
manager = context_manager(level, *args, **kwargs)
else:
manager = context_manager(*args, **kwargs)
yield manager
self._buffer.write(manager.get_result())
setattr(cls, context_name, new_method)
return cls
return wrapper
class TextAccumulator(ABC):
def __init__(self):
self._buffer = StringIO()
@abstractmethod
def get_result(self):
return self._buffer.getvalue()
class Paragraph(TextAccumulator):
def write_line(self, line):
self._buffer.write(line + "\n")
def get_result(self):
return super().get_result() + "\n"
@add_context("paragraph", Paragraph)
class Directive(TextAccumulator):
def __init__(self, name, argument=None, options={}):
super().__init__()
self._buffer.write(f"{name}:: {argument if argument else ''}\n")
for name, value in options.items():
if value:
self._buffer.write(f":{name}: {value}\n")
else:
self._buffer.write(f":{name}:\n")
self._buffer.write("\n")
def get_result(self):
return ".." + textwrap.indent(self._buffer.getvalue(), " ")[2:] + "\n"
class Admonition(Directive):
pass
class Figure(Directive):
def __init__(self, filename, target=None):
opts = {"target": target} if target else {}
super().__init__("figure", filename, opts)
@add_context("paragraph", Paragraph)
class BulletListItem(TextAccumulator):
def get_result(self):
line_start = "-"
indented_result = textwrap.indent(
self._buffer.getvalue(), " " * (len(line_start) + 1)
)
return line_start + indented_result[len(line_start) :]
@add_context("bullet", BulletListItem)
class BulletList(TextAccumulator):
def get_result(self):
return super().get_result()
# Can't reference BulletList before it is defined
BulletListItem = add_context("bullet_list", BulletList)(BulletListItem)
@add_context("paragraph", Paragraph)
@add_context("admonition", Admonition)
@add_context("figure", Figure)
@add_context("bullet_list", BulletList)
@add_context("directive", Directive)
class Section(TextAccumulator):
def __init__(self, level, title, anchor=None):
super().__init__()
self._level = level
if anchor:
self._buffer.write(f".. _{anchor}:\n\n")
self._buffer.write(underline(title, HEADING_CHARS[self._level]) + "\n\n")
def get_result(self):
return super().get_result()
# Can't reference Section before it is defined.
Section = add_context("section", Section, needs_level=True)(Section)
@add_context("paragraph", Paragraph)
@add_context("section", Section, needs_level=True)
@add_context("admonition", Admonition)
@add_context("figure", Figure)
@add_context("bullet_list", BulletList)
@add_context("directive", Directive)
class ReSTDocument(TextAccumulator):
def __init__(self, title=None, subtitle=None, options=None):
super().__init__()
if title:
self._buffer.write(underline(title, HEADING_CHARS[0], True) + "\n")
if subtitle:
self._buffer.write(underline(subtitle, HEADING_CHARS[1], True) + "\n")
options = options or {}
for name, value in options.items():
self._buffer.write(f":{name}:")
if value:
self._buffer.write(f" {value}")
self._buffer.write("\n")
if title or subtitle or options:
self._buffer.write("\n")
def get_result(self):
return super().get_result()
def get_version_info():
pmcs_path = get_latest_pmcs_path()
git_dir = os.path.dirname(pmcs_path)
sha, date = (
subprocess.check_output(
["git", "log", "-1", "--pretty=format:'%H %ad'", "--date=unix"], cwd=git_dir
)
.decode("utf-8")
.strip("'")
.split()
)
p6_date = datetime.strptime(os.path.basename(pmcs_path), "%Y%m-ME.xls")
return sha, datetime.utcfromtimestamp(int(date)), p6_date
def get_extreme_dates(milestones):
earliest_ms, latest_ms = None, None
for ms in milestones:
if not earliest_ms or ms.due < earliest_ms:
earliest_ms = ms.due
if not latest_ms or ms.due > latest_ms:
latest_ms = ms.due
return earliest_ms, latest_ms
def generate_dmtn(milestones, wbs):
doc = ReSTDocument(options={"tocdepth": 1})
wbs_list = set(ms.wbs[:6] for ms in milestones if ms.wbs.startswith(wbs))
# Define replacements for all the milestone codes.
# This lets us change the way they are displayed according to their
# properties. In particular, we emphasize (= set in italics) all the
# completed milestones.
with doc.paragraph() as p:
for ms in milestones:
if ms.completed:
p.write_line(f".. |{ms.code}| replace:: *{ms.code}*")
else:
p.write_line(f".. |{ms.code}| replace:: {ms.code}")
with doc.section("Provenance") as my_section:
with my_section.paragraph() as p:
sha, timestamp, p6_date = get_version_info()
p.write_line(
f"This document was generated based on the contents of "
f"the `lsst-dm/milestones <https://github.com/lsst-dm/milestones>`_ "
f"repository, version "
f"`{sha[:8]} <https://github.com/lsst-dm/milestones/commit/{sha}>`_, "
f"dated {timestamp.strftime('%Y-%m-%d')}."
)
p.write_line(
f"This corresponds to the status recorded in the project "
f"controls system for {p6_date.strftime('%B %Y')}."
)
with doc.section("Notation") as my_section:
with my_section.paragraph() as p:
p.write_line(
"Throughout this document, the identifiers of completed "
"milestones are set in italics; those of milestones which are "
"still pending, in roman."
)
with doc.section("Summary") as my_section:
with my_section.paragraph() as p:
dm_milestones = [ms for ms in milestones if ms.wbs.startswith(wbs)]
levels = [ms.level for ms in dm_milestones]
p.write_line(
f"The DM Subsystem is currently tracking "
f"{len(dm_milestones)} milestones: "
f"{levels.count(1)} at Level 1, "
f"{levels.count(2)} at Level 2, "
f"{levels.count(3)} at Level 3, "
f"and {levels.count(4)} at Level 4."
)
if levels.count(None) != 0:
p.write_line(f"{levels.count(None)} have no level defined.")
p.write_line(
f"Of these, {len([ms for ms in dm_milestones if ms.completed])} "
f"have been completed."
)
p.write_line(
f"Of the incomplete milestones, "
f"{sum(1 for ms in dm_milestones if ms.due < datetime.now() and not ms.completed)} "
f"are late relative to the baseline schedule, while "
f"the remainder are scheduled for the future."
)
with my_section.figure("_static/burndown.png") as f:
with f.paragraph() as p:
p.write_line("Milestone completion as a function of date.")
with doc.section("Currently overdue milestones") as my_section:
overdue_milestones = [
ms
for ms in milestones
if ms.due < datetime.now() and ms.wbs.startswith(wbs) and not ms.completed
]
if overdue_milestones:
with my_section.bullet_list() as my_list:
for ms in sorted(overdue_milestones, key=lambda ms: ms.wbs + ms.code):
with my_list.bullet() as b:
with b.paragraph() as p:
p.write_line(
f"`{ms.code}`_: {ms.name} "
f"[Due {ms.due.strftime('%Y-%m-%d')}]"
)
else:
with my_section.paragraph() as p:
p.write_line("None.")
with doc.section("Milestones by due date") as my_section:
earliest_ms, latest_ms = get_extreme_dates(
ms for ms in milestones if ms.wbs.startswith(wbs)
)
first_month = datetime(earliest_ms.year, earliest_ms.month, 1)
last_month = (
datetime(latest_ms.year, latest_ms.month + 1, 1)
if latest_ms.month < 12
else datetime(latest_ms.year + 1, 1, 1)
)
for year in range(latest_ms.year, earliest_ms.year - 1, -1):
for month in range(12, 0, -1):
start_date = datetime(year, month, 1)
end_date = (
datetime(year, month + 1, 1)
if month < 12
else datetime(year + 1, 1, 1)
)
if end_date <= first_month or start_date >= last_month:
continue
with my_section.section(f"Due in {start_date.strftime('%B %Y')}") as s:
output = [
ms
for ms in milestones
if ms.due >= start_date
and ms.due < end_date
and ms.wbs.startswith(wbs)
]
with s.bullet_list() as my_list:
if output:
for ms in output:
with my_list.bullet() as b:
with b.paragraph() as p:
p.write_line(f"|{ms.code}|_: {ms.name}")
else:
with my_list.bullet() as b:
with b.paragraph() as p:
p.write_line("No milestones due.")
with doc.section("Milestones by WBS") as my_section:
for sub_wbs in sorted(wbs_list):
with my_section.section(
f"{sub_wbs}: {WBS_DEFINITIONS[sub_wbs]}"
) as section:
with section.figure(
f"_static/graph_{sub_wbs}.png",
target=f"_static/graph_{sub_wbs}.png",
) as f:
with f.paragraph() as p:
p.write_line(
f"Relationships between milestones in WBS {sub_wbs} and "
f"their immediate predecessors and successors. "
f"Ellipses correspond to milestones within this WBS "
f"element; rectangles to those in other elements. "
f"Blue milestones have been completed; orange "
f"milestones are overdue."
)
for ms in sorted(milestones, key=lambda ms: ms.due):
if not ms.wbs.startswith(sub_wbs):
continue
with section.section(
f"|{ms.code}|: {ms.name}", ms.code
) as subsection:
with subsection.bullet_list() as my_list:
with my_list.bullet() as my_bullet:
with my_bullet.paragraph() as p:
p.write_line(f"**WBS:** {ms.wbs}")
with my_list.bullet() as my_bullet:
with my_bullet.paragraph() as p:
level = ms.level if ms.level else "Undefined"
p.write_line(f"**Level:** {level}")
if ms.test_spec or ms.jira_testplan:
with my_list.bullet() as my_bullet:
with my_bullet.paragraph() as p:
p.write_line(f"**Test specification:**")
if ms.test_spec:
p.write_line(
add_rst_citations(f"{ms.test_spec}")
)
else:
p.write_line("Undefined")
if ms.jira_testplan:
p.write_line(f":jirab:`{ms.jira_testplan}`")
preds, succs = [], []
for candidate in milestones:
if candidate.code in ms.predecessors:
if candidate.wbs.startswith(wbs):
preds.append(f"|{candidate.code}|_")
else:
preds.append(f"|{candidate.code}|")
if candidate.code in ms.successors:
if candidate.wbs.startswith(wbs):
succs.append(f"|{candidate.code}|_")
else:
succs.append(f"|{candidate.code}|")
if preds:
with my_list.bullet() as my_bullet:
with my_bullet.paragraph() as p:
p.write_line(
f"**Predecessors**: {', '.join(preds)}"
)
if succs:
with my_list.bullet() as my_bullet:
with my_bullet.paragraph() as p:
p.write_line(
f"**Successors**: {', '.join(succs)}"
)
with my_list.bullet() as my_bullet:
with my_bullet.paragraph() as p:
p.write_line(
f"**Due:** {ms.due.strftime('%Y-%m-%d')}"
)
with my_list.bullet() as my_bullet:
with my_bullet.paragraph() as p:
if ms.completed:
p.write_line(
f"**Completed:** {ms.completed.strftime('%Y-%m-%d')}"
)
else:
p.write_line(f"**Completion pending**")
if ms.jira:
p.write_line(f":jirab:`{ms.jira}`")
if ms.description:
with subsection.paragraph() as p:
for line in ms.description.strip().split(". "):
p.write_line(
add_rst_citations(line.strip(" .") + ".")
)
else:
with subsection.admonition(
"warning", "No description available"
):
pass
with doc.section("Bibliography") as bib:
with bib.directive(
"bibliography",
" ".join(glob.glob("lsstbib/*.bib")),
{"style": "lsst_aa"},
):
pass
return doc.get_result()
if __name__ == "__main__":
milestones = load_milestones(get_latest_pmcs_path(), get_local_data_path())
write_output("index.rst", generate_dmtn(milestones, "02C"), comment_prefix="..")
| 39.87844 | 100 | 0.497613 |
52e6aaa0e3a08c13b1310d6fba701f49e484c490 | 3,113 | py | Python | engram/engram.py | rgrannell1/engram.py | 69ca1af7b0ddb963a611e15414aa2eda48d6c325 | [
"MIT",
"Unlicense"
] | null | null | null | engram/engram.py | rgrannell1/engram.py | 69ca1af7b0ddb963a611e15414aa2eda48d6c325 | [
"MIT",
"Unlicense"
] | 36 | 2015-01-24T23:12:10.000Z | 2015-07-12T19:01:44.000Z | engram/engram.py | rgrannell1/engram.py | 69ca1af7b0ddb963a611e15414aa2eda48d6c325 | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import os
import time
import sys
import sql
import queue
import routes
import signal
import threading
from request_url import request_url
from db import Database, WriteJob, ReadJob
from result import Ok, Err, Result
from flask import Flask, redirect, url_for, request
from shareddict import SharedDict
import threading
import logging
logging.basicConfig(level = logging.INFO)
logger = logging.getLogger(__name__)
def create_tables(database_in):
database_in.put( WriteJob("""
CREATE TABLE IF NOT EXISTS archives (
archive_id integer PRIMARY KEY AUTOINCREMENT,
content blob NOT NULL,
mimetype text NOT NULL,
ctime integer NOT NULL
);
""", ( )) )
database_in.put( WriteJob("""
CREATE TABLE IF NOT EXISTS bookmarks (
bookmark_id integer PRIMARY KEY AUTOINCREMENT,
url text NOT NULL,
title text NOT NULL,
ctime integer NOT NULL
);
""", ( )) )
database_in.put( WriteJob("""
CREATE TABLE IF NOT EXISTS bookmark_archives (
bookmark_archive_id integer PRIMARY KEY AUTOINCREMENT,
bookmark_id REFERENCES bookmarks(bookmark_id),
archive_id REFERENCES archives(archive_id)
);
""", ( )) )
def create_server(fpath, database_in, database_out, test = None):
app = Flask(__name__)
if test:
app.config['TESTING'] = True
create_result = create_tables(database_in)
route_result = (
Ok(None)
.tap(lambda _: routes.index (app))
.tap(lambda _: routes.bookmarks (app))
.tap(lambda _: routes.public (app))
.tap(lambda _: routes.delete (app, database_in, database_out))
.tap(lambda _: routes.export (app, database_in, database_out))
.tap(lambda _: routes.restore (app, database_in, database_out))
.tap(lambda _: routes.archives (app, database_in, database_out))
.tap(lambda _: routes.favicon (app, database_in, database_out))
.tap(lambda _: routes.resave (app, database_in, database_out))
.tap(lambda _: routes.default (app, database_in, database_out))
.tap(lambda _: routes.bookmarks_api_route(app, database_in, database_out))
)
app.run(threaded = True)
return app
def create(fpath, database_in, database_out, test = None):
def sigterm_handler(signal, stack_frame):
""" cleanly shut down when the SIGTERM signal is sent. """
logger.info('shutting down.')
request.environ.get('werkzeug.server.shutdown')( )
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
create_server(fpath, database_in, database_out, test = None)
if __name__ == "__main__":
database_in = queue.Queue( )
database_out = SharedDict( )
def consume_database_jobs( ):
database = Database('data/engram', database_in, database_out)
while True:
database.perform( )
database_thread = threading.Thread(target = consume_database_jobs)
database_thread.start( )
create('data/engram', database_in, database_out)
| 19.45625 | 76 | 0.66656 |
d2b483f81f8bcbb48ab01f315b44f5de4eb22981 | 9,348 | py | Python | core/src/main/python/wlsdeploy/aliases/alias_constants.py | avijeetgorai/weblogic-deploy-tooling | 9f7f8a7a101a6f5c8381db84cba262d90b6bff25 | [
"Apache-2.0",
"MIT"
] | null | null | null | core/src/main/python/wlsdeploy/aliases/alias_constants.py | avijeetgorai/weblogic-deploy-tooling | 9f7f8a7a101a6f5c8381db84cba262d90b6bff25 | [
"Apache-2.0",
"MIT"
] | null | null | null | core/src/main/python/wlsdeploy/aliases/alias_constants.py | avijeetgorai/weblogic-deploy-tooling | 9f7f8a7a101a6f5c8381db84cba262d90b6bff25 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-02-13T04:08:39.000Z | 2020-02-13T04:08:39.000Z | """
Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl.
These constants are internal to the aliases module and should not be used, as they are not part of the public API.
"""
from wlsdeploy.util.enum import Enum
ACCESS = 'access'
ATTRIBUTES = 'attributes'
CHILD_FOLDERS_TYPE = 'child_folders_type'
CONTAINS = 'contains'
DEFAULT = 'default'
DEFAULT_NAME_VALUE = 'default_name_value'
FLATTENED_FOLDER_DATA = 'flattened_folder_data'
FOLDERS = 'folders'
FOLDER_PARAMS = 'folder_params'
GET_MBEAN_TYPE = 'get_mbean_type'
GET_METHOD = 'get_method'
MERGE = 'merge'
MODEL_NAME = 'model_name'
NAME_VALUE = 'name_value'
PASSWORD_TOKEN = "--FIX ME--"
PREFERRED_MODEL_TYPE = 'preferred_model_type'
RESTART_REQUIRED = 'restart_required'
SET_MBEAN_TYPE = 'set_mbean_type'
SET_METHOD = 'set_method'
UNRESOLVED_ATTRIBUTES_MAP = '__unresolved_attributes__'
UNRESOLVED_FOLDERS_MAP = '__unresolved_folders__'
USES_PATH_TOKENS = 'uses_path_tokens'
VALUE = 'value'
# VERSION is used for folder versioning
VERSION = 'version'
# VERSION_RAGE is used for attribute versioning
VERSION_RANGE = 'version'
WLST_ATTRIBUTES_PATH = 'wlst_attributes_path'
WLST_CREATE_PATH = 'wlst_create_path'
WLST_LIST_PATH = 'wlst_list_path'
WLST_MODE = 'wlst_mode'
WLST_NAME = 'wlst_name'
WLST_NAMES_MAP = '__wlst_names__'
WLST_PATH = 'wlst_path'
WLST_PATHS = 'wlst_paths'
WLST_READ_TYPE = 'wlst_read_type'
WLST_SKIP_NAMES = '__wlst_skip_names__'
WLST_SUBFOLDERS_PATH = 'wlst_subfolders_path'
WLST_TYPE = 'wlst_type'
# CHILD_FOLDER_TYPES
MULTIPLE = 'multiple'
MULTIPLE_WITH_TYPE_SUBFOLDER = 'multiple_with_type_subfolder'
NONE_CHILD_FOLDERS_TYPE = 'none'
SINGLE = 'single'
SINGLE_UNPREDICTABLE = 'single_unpredictable'
ChildFoldersTypes = Enum(['MULTIPLE', 'MULTIPLE_WITH_TYPE_SUBFOLDER', 'NONE', 'SINGLE', 'SINGLE_UNPREDICTABLE'])
# get_method values
GET = 'GET'
LSA = 'LSA'
NONE = 'NONE'
# set_method values
MBEAN = 'MBEAN'
# attribute wlst_type values
BOOLEAN = 'boolean'
COMMA_DELIMITED_STRING = 'delimited_string[comma]'
CREDENTIAL = 'credential'
DELIMITED_STRING = 'delimited_string'
DICTIONARY = 'dict'
DOUBLE = 'double'
INTEGER = 'integer'
JARRAY = 'jarray'
JAVA_LANG_BOOLEAN = 'java.lang.Boolean'
LIST = 'list'
LONG = 'long'
OBJECT = 'object'
PASSWORD = 'password'
PATH_SEPARATOR_DELIMITED_STRING = 'delimited_string[path_separator]'
PROPERTIES = 'properties'
SEMI_COLON_DELIMITED_STRING = 'delimited_string[semicolon]'
SPACE_DELIMITED_STRING = 'delimited_string[space]'
STRING = 'string'
MASKED = '<masked>'
ALIAS_DELIMITED_TYPES = [
COMMA_DELIMITED_STRING,
DELIMITED_STRING,
PATH_SEPARATOR_DELIMITED_STRING,
SEMI_COLON_DELIMITED_STRING,
SPACE_DELIMITED_STRING
]
ALIAS_LIST_TYPES = [
COMMA_DELIMITED_STRING,
DELIMITED_STRING,
JARRAY,
LIST,
PATH_SEPARATOR_DELIMITED_STRING,
SEMI_COLON_DELIMITED_STRING,
SPACE_DELIMITED_STRING
]
ALIAS_MAP_TYPES = [PROPERTIES, DICTIONARY]
ALIAS_BOOLEAN_TYPES = [
BOOLEAN,
JAVA_LANG_BOOLEAN
]
ALIAS_PRIMITIVE_DATA_TYPES = [
BOOLEAN,
CREDENTIAL,
DOUBLE,
INTEGER,
JAVA_LANG_BOOLEAN,
LONG,
PASSWORD,
STRING
]
ALIAS_NUMERIC_DATA_TYPES = [
DOUBLE,
INTEGER,
LONG
]
ALIAS_DATA_TYPES = list()
ALIAS_DATA_TYPES.extend(ALIAS_PRIMITIVE_DATA_TYPES)
ALIAS_DATA_TYPES.extend(ALIAS_LIST_TYPES)
ALIAS_DATA_TYPES.extend(ALIAS_MAP_TYPES)
def __build_security_provider_data_structures(name_map, base_path):
"""
Populate the security provider data structures for the given provider type.
:param name_map: the provider name map
:param base_path: the provider base path
"""
for key, value in name_map.iteritems():
SECURITY_PROVIDER_FOLDER_PATHS.append(base_path + '/' + key)
SECURITY_PROVIDER_NAME_MAP[key] = value
mbean_name = value + 'MBean'
SECURITY_PROVIDER_MBEAN_NAME_MAP[mbean_name] = key
return
ADJUDICATION_PROVIDER_NAME_MAP = {
'DefaultAdjudicator': 'weblogic.security.providers.authorization.DefaultAdjudicator'
}
AUDIT_PROVIDER_NAME_MAP = {
'DefaultAuditor': 'weblogic.security.providers.audit.DefaultAuditor'
}
AUTHENTICATION_PROVIDER_NAME_MAP = {
'SAML2IdentityAsserter': 'com.bea.security.saml2.providers.SAML2IdentityAsserter',
'ActiveDirectoryAuthenticator': 'weblogic.security.providers.authentication.ActiveDirectoryAuthenticator',
'CustomDBMSAuthenticator': 'weblogic.security.providers.authentication.CustomDBMSAuthenticator',
'DefaultAuthenticator': 'weblogic.security.providers.authentication.DefaultAuthenticator',
'DefaultIdentityAsserter': 'weblogic.security.providers.authentication.DefaultIdentityAsserter',
'IPlanetAuthenticator': 'weblogic.security.providers.authentication.IPlanetAuthenticator',
'LDAPAuthenticator': 'weblogic.security.providers.authentication.LDAPAuthenticator',
'LDAPX509IdentityAsserter': 'weblogic.security.providers.authentication.LDAPX509IdentityAsserter',
'NegotiateIdentityAsserter': 'weblogic.security.providers.authentication.NegotiateIdentityAsserter',
'NovellAuthenticator': 'weblogic.security.providers.authentication.NovellAuthenticator',
'OpenLDAPAuthenticator': 'weblogic.security.providers.authentication.OpenLDAPAuthenticator',
'OracleInternetDirectoryAuthenticator':
'weblogic.security.providers.authentication.OracleInternetDirectoryAuthenticator',
'OracleUnifiedDirectoryAuthenticator':
'weblogic.security.providers.authentication.OracleUnifiedDirectoryAuthenticator',
'OracleVirtualDirectoryAuthenticator':
'weblogic.security.providers.authentication.OracleVirtualDirectoryAuthenticator',
'ReadOnlySQLAuthenticator': 'weblogic.security.providers.authentication.ReadOnlySQLAuthenticator',
'SQLAuthenticator': 'weblogic.security.providers.authentication.SQLAuthenticator',
'VirtualUserAuthenticator': 'weblogic.security.providers.authentication.VirtualUserAuthenticator',
'SAMLAuthenticator': 'weblogic.security.providers.saml.SAMLAuthenticator',
'SAMLIdentityAsserterV2': 'weblogic.security.providers.saml.SAMLIdentityAsserterV2',
'TrustServiceIdentityAsserter': 'oracle.security.jps.wls.providers.trust.TrustServiceIdentityAsserter',
}
AUTHORIZATION_PROVIDER_NAME_MAP = {
'DefaultAuthorizer': 'weblogic.security.providers.authorization.DefaultAuthorizer',
'XACMLAuthorizer': 'weblogic.security.providers.xacml.authorization.XACMLAuthorizer'
}
CERT_PATH_PROVIDER_NAME_MAP = {
'CertificateRegistry': 'weblogic.security.providers.pk.CertificateRegistry',
'WebLogicCertPathProvider': 'weblogic.security.providers.pk.WebLogicCertPathProvider'
}
CREDENTIAL_MAPPING_PROVIDER_NAME_MAP = {
'SAML2CredentialMapper': 'com.bea.security.saml2.providers.SAML2CredentialMapper',
'DefaultCredentialMapper': 'weblogic.security.providers.credentials.DefaultCredentialMapper',
'PKICredentialMapper': 'weblogic.security.providers.credentials.PKICredentialMapper',
'SAMLCredentialMapperV2': 'weblogic.security.providers.saml.SAMLCredentialMapperV2'
}
PASSWORD_VALIDATION_PROVIDER_NAME_MAP = {
'SystemPasswordValidator': 'com.bea.security.providers.authentication.passwordvalidator.SystemPasswordValidator'
}
ROLE_MAPPING_PROVIDER_NAME_MAP = {
'DefaultRoleMapper': 'weblogic.security.providers.authorization.DefaultRoleMapper',
'XACMLRoleMapper': 'weblogic.security.providers.xacml.authorization.XACMLRoleMapper'
}
REALM_FOLDER_PATH = '/SecurityConfiguration/Realm'
ADJUDICATION_PROVIDER_PARENT_FOLDER_PATH = REALM_FOLDER_PATH + '/Adjudicator'
AUDIT_PROVIDER_PARENT_FOLDER_PATH = REALM_FOLDER_PATH + '/Auditor'
AUTHENTICATION_PROVIDER_PARENT_FOLDER_PATH = REALM_FOLDER_PATH + '/AuthenticationProvider'
AUTHORIZATION_PROVIDER_PARENT_FOLDER_PATH = REALM_FOLDER_PATH + '/Authorizer'
CERT_PATH_PROVIDER_PARENT_FOLDER_PATH = REALM_FOLDER_PATH + '/CertPathProvider'
CREDENTIAL_MAPPING_PROVIDER_PARENT_FOLDER_PATH = REALM_FOLDER_PATH + '/CredentialMapper'
PASSWORD_VALIDATION_PROVIDER_PARENT_FOLDER_PATH = REALM_FOLDER_PATH + '/PasswordValidator'
ROLE_MAPPING_PROVIDER_PARENT_FOLDER_PATH = REALM_FOLDER_PATH + '/RoleMapper'
SECURITY_PROVIDER_FOLDER_PATHS = list()
SECURITY_PROVIDER_NAME_MAP = dict()
SECURITY_PROVIDER_MBEAN_NAME_MAP = dict()
__build_security_provider_data_structures(ADJUDICATION_PROVIDER_NAME_MAP, ADJUDICATION_PROVIDER_PARENT_FOLDER_PATH)
__build_security_provider_data_structures(AUDIT_PROVIDER_NAME_MAP, AUDIT_PROVIDER_PARENT_FOLDER_PATH)
__build_security_provider_data_structures(AUTHENTICATION_PROVIDER_NAME_MAP, AUTHENTICATION_PROVIDER_PARENT_FOLDER_PATH)
__build_security_provider_data_structures(AUTHORIZATION_PROVIDER_NAME_MAP, AUTHORIZATION_PROVIDER_PARENT_FOLDER_PATH)
__build_security_provider_data_structures(CERT_PATH_PROVIDER_NAME_MAP, CERT_PATH_PROVIDER_PARENT_FOLDER_PATH)
__build_security_provider_data_structures(CREDENTIAL_MAPPING_PROVIDER_NAME_MAP,
CREDENTIAL_MAPPING_PROVIDER_PARENT_FOLDER_PATH)
__build_security_provider_data_structures(PASSWORD_VALIDATION_PROVIDER_NAME_MAP,
PASSWORD_VALIDATION_PROVIDER_PARENT_FOLDER_PATH)
__build_security_provider_data_structures(ROLE_MAPPING_PROVIDER_NAME_MAP, ROLE_MAPPING_PROVIDER_PARENT_FOLDER_PATH)
| 40.467532 | 119 | 0.811617 |
788be50b3b958fa572d3e8196d9aad28a00ef003 | 1,789 | py | Python | examples/metrics.py | autodesk-cloud/ochopod | a934aa68ed3d1b242856006052c12e95258cd8c3 | [
"Apache-2.0"
] | 139 | 2015-03-12T17:26:07.000Z | 2020-10-04T15:25:48.000Z | examples/metrics.py | autodesk-cloud/ochopod | a934aa68ed3d1b242856006052c12e95258cd8c3 | [
"Apache-2.0"
] | 28 | 2015-04-01T06:12:09.000Z | 2016-06-21T09:36:32.000Z | examples/metrics.py | autodesk-cloud/ochopod | a934aa68ed3d1b242856006052c12e95258cd8c3 | [
"Apache-2.0"
] | 31 | 2015-03-24T20:19:09.000Z | 2021-06-13T11:00:20.000Z | #
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script illustrates how you can report simple user metrics. Just start a local standalone Zookeeper server and run
"python metrics.py".
"""
from ochopod.bindings.generic.marathon import Pod
from ochopod.models.piped import Actor as Piped
from time import time
if __name__ == '__main__':
class Strategy(Piped):
check_every = 1.0
pid = None
since = 0.0
def sanity_check(self, pid):
#
# - simply use the provided process ID to start counting time
# - this is a cheap way to measure the sub-process up-time
#
if pid != self.pid:
self.pid = pid
self.since = time()
lapse = (time() - self.since) / 60.0
return {'uptime': '%.2f minutes (pid %s)' % (lapse, pid)}
def configure(self, _):
#
# - just go to sleep, the point is not to run anything meaningful
#
return 'sleep 3600', {}
#
# - if you run this script locally and curl http://locahost:8080/info you will see the metrics.
# - simply type CTRL-C to exit
#
Pod().boot(Strategy, local=1)
| 28.396825 | 118 | 0.633315 |
9b2b7a2459d52aa35ec7f50778e22a53dee314ab | 9,627 | py | Python | doc/conf.py | jnothman/sphinx-gallery | b930662613a32fe05f16b39f86fafdb4c8d6f424 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | jnothman/sphinx-gallery | b930662613a32fe05f16b39f86fafdb4c8d6f424 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | jnothman/sphinx-gallery | b930662613a32fe05f16b39f86fafdb4c8d6f424 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Sphinx-Gallery documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 17 16:01:26 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import date
import sphinxgallery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinxgallery.gen_gallery',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sphinx-Gallery'
copyright = u'2014-%s, Óscar Nájera' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = sphinxgallery.__version__
# The full version, including alpha/beta/rc tags.
release = sphinxgallery.__version__ + '-git'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
def setup(app):
app.add_stylesheet('theme_override.css')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sphinx-Gallerydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Sphinx-Gallery.tex', u'Sphinx-Gallery Documentation',
u'Óscar Nájera', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinx-gallery', u'Sphinx-Gallery Documentation',
[u'Óscar Nájera'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Sphinx-Gallery', u'Sphinx-Gallery Documentation',
u'Óscar Nájera', 'Sphinx-Gallery', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
examples_dirs = ['../examples', '../tutorials']
gallery_dirs = ['auto_examples', 'tutorials']
try:
# Run the mayavi examples and find the mayavi figures if mayavi is
# installed
from mayavi import mlab
find_mayavi_figures = True
examples_dirs.append('../mayavi_examples')
gallery_dirs.append('auto_mayavi_examples')
# Do not pop up any mayavi windows while running the
# examples. These are very annoying since they steal the focus.
mlab.options.offscreen = True
except ImportError:
find_mayavi_figures = False
sphinxgallery_conf = {
'doc_module': ('sphinxgallery', 'numpy'),
'reference_url': {
'sphinxgallery': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},
'examples_dirs': examples_dirs,
'gallery_dirs': gallery_dirs,
'find_mayavi_figures': find_mayavi_figures,
}
| 31.667763 | 79 | 0.716423 |
64b7fc08603a2bd5941862858a8836e7efeb07bd | 2,646 | py | Python | project_files/retention_model.py | nmlook/EDS-Project-3-Workbook-Bundle | 847d0ebe1beae75148bb25aa6b16835efda05a96 | [
"MIT"
] | null | null | null | project_files/retention_model.py | nmlook/EDS-Project-3-Workbook-Bundle | 847d0ebe1beae75148bb25aa6b16835efda05a96 | [
"MIT"
] | null | null | null | project_files/retention_model.py | nmlook/EDS-Project-3-Workbook-Bundle | 847d0ebe1beae75148bb25aa6b16835efda05a96 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import pandas as pd
# pd.options.mode.chained_assignment = None # default='warn'
import pickle
import sklearn
import sys
import fire
class EmployeeRetentionModel:
def __init__(self, model_location):
with open(model_location, 'rb') as f:
self.model = pickle.load(f)
def predict_proba(self, X_new, clean=True, augment=True):
if clean:
X_new = self.clean_data(X_new)
if augment:
X_new = self.engineer_features(X_new)
return X_new, self.model.predict_proba(X_new)
# Add functions here
def clean_data(self, df):
# Drop duplicates
df = df.drop_duplicates()
# Drop temporary workers
df = df[df.department != 'temp']
# Missing filed_complaint values should be 0
df['filed_complaint'] = df.filed_complaint.fillna(0)
# Missing recently_promoted values should be 0
df['recently_promoted'] = df.recently_promoted.fillna(0)
# 'information_technology' should be 'IT'
df.department.replace('information_technology', 'IT', inplace=True)
# Fill missing values in department with 'Missing'
df['department'].fillna('Missing', inplace=True)
# Indicator variable for missing last_evaluation
df['last_evaluation_missing'] = df.last_evaluation.isnull().astype(int)
# Fill missing values in last_evaluation with 0
df.last_evaluation.fillna(0, inplace=True)
# Return cleaned dataframe
return df
def engineer_features(self, df):
# Create indicator features
df['underperformer'] = ((df.last_evaluation < 0.6) &
(df.last_evaluation_missing == 0)).astype(int)
df['unhappy'] = (df.satisfaction < 0.2).astype(int)
df['overachiever'] = ((df.last_evaluation > 0.8) & (df.satisfaction > 0.7)).astype(int)
# Create new dataframe with dummy features
df = pd.get_dummies(df, columns=['department', 'salary'])
# Return augmented DataFrame
return df
def main(data_location, output_location, model_location, clean=True, augment=True):
# Read dataset
df = pd.read_csv(data_location)
# Initialize model
retention_model = EmployeeRetentionModel(model_location)
# Make prediction
df, pred = retention_model.predict_proba(df)
pred = [p[1] for p in pred]
# Add prediction to dataset
df['prediction'] = pred
# Save dataset after making predictions
df.to_csv(output_location, index=None)
if __name__ == '__main__':
main(*sys.argv[1:]) | 30.068182 | 95 | 0.655707 |
4636b95da3ad075e55f245edd69bd8170f288aa1 | 897 | py | Python | dimmer.py | jeffkub/led-wall-clock | 12f39779e0508fab5d6fc835adbd2cf2b4014228 | [
"MIT"
] | 11 | 2016-12-05T23:34:55.000Z | 2021-01-11T19:32:33.000Z | dimmer.py | jeffkub/led-wall-clock | 12f39779e0508fab5d6fc835adbd2cf2b4014228 | [
"MIT"
] | 3 | 2016-03-24T02:49:12.000Z | 2018-06-19T07:05:33.000Z | dimmer.py | jeffkub/led-wall-clock | 12f39779e0508fab5d6fc835adbd2cf2b4014228 | [
"MIT"
] | 5 | 2016-12-06T20:43:11.000Z | 2021-11-14T15:34:07.000Z | import ephem
import logging
class Dimmer(object):
def __init__(self, scheduler):
self._observer = ephem.Observer()
self._observer.pressure = 0
self._observer.horizon = '-6'
self._observer.lat = '38.262469'
self._observer.lon = '-85.648625'
self.brightness = 100
self.update()
# Run every 5 minutes
scheduler.add_job(self.update, 'cron', minute='*/5')
def update(self):
self._observer.date = ephem.now()
morning = self._observer.next_rising(ephem.Sun(), use_center=True)
night = self._observer.next_setting(ephem.Sun(), use_center=True)
if morning < night:
# Morning is sooner, so it must be night
logging.info("It is night time")
self.brightness = 10
else:
logging.info("It is day time")
self.brightness = 25
| 27.181818 | 74 | 0.591973 |
b18ce25fd218201ab75f4b3c14f9c7b66f84f373 | 10,443 | py | Python | sdk/python/arvados/api.py | basharbme/arvados | 1c3c8f7fd2e1268b139e046fbd6a7093dd82222f | [
"Apache-2.0"
] | 1 | 2019-09-08T01:49:09.000Z | 2019-09-08T01:49:09.000Z | sdk/python/arvados/api.py | basharbme/arvados | 1c3c8f7fd2e1268b139e046fbd6a7093dd82222f | [
"Apache-2.0"
] | null | null | null | sdk/python/arvados/api.py | basharbme/arvados | 1c3c8f7fd2e1268b139e046fbd6a7093dd82222f | [
"Apache-2.0"
] | null | null | null | # Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
import collections
import http.client
import httplib2
import json
import logging
import os
import re
import socket
import time
import types
import apiclient
from apiclient import discovery as apiclient_discovery
from apiclient import errors as apiclient_errors
from . import config
from . import errors
from . import util
from . import cache
_logger = logging.getLogger('arvados.api')
MAX_IDLE_CONNECTION_DURATION = 30
RETRY_DELAY_INITIAL = 2
RETRY_DELAY_BACKOFF = 2
RETRY_COUNT = 2
class OrderedJsonModel(apiclient.model.JsonModel):
"""Model class for JSON that preserves the contents' order.
API clients that care about preserving the order of fields in API
server responses can use this model to do so, like this::
from arvados.api import OrderedJsonModel
client = arvados.api('v1', ..., model=OrderedJsonModel())
"""
def deserialize(self, content):
# This is a very slightly modified version of the parent class'
# implementation. Copyright (c) 2010 Google.
content = content.decode('utf-8')
body = json.loads(content, object_pairs_hook=collections.OrderedDict)
if self._data_wrapper and isinstance(body, dict) and 'data' in body:
body = body['data']
return body
def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
if (self.max_request_size and
kwargs.get('body') and
self.max_request_size < len(kwargs['body'])):
raise apiclient_errors.MediaUploadSizeError("Request size %i bytes exceeds published limit of %i bytes" % (len(kwargs['body']), self.max_request_size))
if config.get("ARVADOS_EXTERNAL_CLIENT", "") == "true":
headers['X-External-Client'] = '1'
headers['Authorization'] = 'OAuth2 %s' % self.arvados_api_token
if not headers.get('X-Request-Id'):
headers['X-Request-Id'] = self._request_id()
retryable = method in [
'DELETE', 'GET', 'HEAD', 'OPTIONS', 'PUT']
retry_count = self._retry_count if retryable else 0
if (not retryable and
time.time() - self._last_request_time > self._max_keepalive_idle):
# High probability of failure due to connection atrophy. Make
# sure this request [re]opens a new connection by closing and
# forgetting all cached connections first.
for conn in self.connections.values():
conn.close()
self.connections.clear()
delay = self._retry_delay_initial
for _ in range(retry_count):
self._last_request_time = time.time()
try:
return self.orig_http_request(uri, method, headers=headers, **kwargs)
except http.client.HTTPException:
_logger.debug("Retrying API request in %d s after HTTP error",
delay, exc_info=True)
except socket.error:
# This is the one case where httplib2 doesn't close the
# underlying connection first. Close all open
# connections, expecting this object only has the one
# connection to the API server. This is safe because
# httplib2 reopens connections when needed.
_logger.debug("Retrying API request in %d s after socket error",
delay, exc_info=True)
for conn in self.connections.values():
conn.close()
except httplib2.SSLHandshakeError as e:
# Intercept and re-raise with a better error message.
raise httplib2.SSLHandshakeError("Could not connect to %s\n%s\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority." % (uri, e))
time.sleep(delay)
delay = delay * self._retry_delay_backoff
self._last_request_time = time.time()
return self.orig_http_request(uri, method, headers=headers, **kwargs)
def _patch_http_request(http, api_token):
http.arvados_api_token = api_token
http.max_request_size = 0
http.orig_http_request = http.request
http.request = types.MethodType(_intercept_http_request, http)
http._last_request_time = 0
http._max_keepalive_idle = MAX_IDLE_CONNECTION_DURATION
http._retry_delay_initial = RETRY_DELAY_INITIAL
http._retry_delay_backoff = RETRY_DELAY_BACKOFF
http._retry_count = RETRY_COUNT
http._request_id = util.new_request_id
return http
# Monkey patch discovery._cast() so objects and arrays get serialized
# with json.dumps() instead of str().
_cast_orig = apiclient_discovery._cast
def _cast_objects_too(value, schema_type):
global _cast_orig
if (type(value) != type('') and
type(value) != type(b'') and
(schema_type == 'object' or schema_type == 'array')):
return json.dumps(value)
else:
return _cast_orig(value, schema_type)
apiclient_discovery._cast = _cast_objects_too
# Convert apiclient's HttpErrors into our own API error subclass for better
# error reporting.
# Reassigning apiclient_errors.HttpError is not sufficient because most of the
# apiclient submodules import the class into their own namespace.
def _new_http_error(cls, *args, **kwargs):
return super(apiclient_errors.HttpError, cls).__new__(
errors.ApiError, *args, **kwargs)
apiclient_errors.HttpError.__new__ = staticmethod(_new_http_error)
def http_cache(data_type):
homedir = os.environ.get('HOME')
if not homedir or len(homedir) == 0:
return None
path = homedir + '/.cache/arvados/' + data_type
try:
util.mkdir_dash_p(path)
except OSError:
return None
return cache.SafeHTTPCache(path, max_age=60*60*24*2)
def api(version=None, cache=True, host=None, token=None, insecure=False,
request_id=None, timeout=5*60, **kwargs):
"""Return an apiclient Resources object for an Arvados instance.
:version:
A string naming the version of the Arvados API to use (for
example, 'v1').
:cache:
Use a cache (~/.cache/arvados/discovery) for the discovery
document.
:host:
The Arvados API server host (and optional :port) to connect to.
:token:
The authentication token to send with each API call.
:insecure:
If True, ignore SSL certificate validation errors.
:timeout:
A timeout value for http requests.
:request_id:
Default X-Request-Id header value for outgoing requests that
don't already provide one. If None or omitted, generate a random
ID. When retrying failed requests, the same ID is used on all
attempts.
Additional keyword arguments will be passed directly to
`apiclient_discovery.build` if a new Resource object is created.
If the `discoveryServiceUrl` or `http` keyword arguments are
missing, this function will set default values for them, based on
the current Arvados configuration settings.
"""
if not version:
version = 'v1'
_logger.info("Using default API version. " +
"Call arvados.api('%s') instead." %
version)
if 'discoveryServiceUrl' in kwargs:
if host:
raise ValueError("both discoveryServiceUrl and host provided")
# Here we can't use a token from environment, config file,
# etc. Those probably have nothing to do with the host
# provided by the caller.
if not token:
raise ValueError("discoveryServiceUrl provided, but token missing")
elif host and token:
pass
elif not host and not token:
return api_from_config(
version=version, cache=cache, request_id=request_id, **kwargs)
else:
# Caller provided one but not the other
if not host:
raise ValueError("token argument provided, but host missing.")
else:
raise ValueError("host argument provided, but token missing.")
if host:
# Caller wants us to build the discoveryServiceUrl
kwargs['discoveryServiceUrl'] = (
'https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' % (host,))
if 'http' not in kwargs:
http_kwargs = {'ca_certs': util.ca_certs_path()}
if cache:
http_kwargs['cache'] = http_cache('discovery')
if insecure:
http_kwargs['disable_ssl_certificate_validation'] = True
kwargs['http'] = httplib2.Http(**http_kwargs)
if kwargs['http'].timeout is None:
kwargs['http'].timeout = timeout
kwargs['http'] = _patch_http_request(kwargs['http'], token)
svc = apiclient_discovery.build('arvados', version, cache_discovery=False, **kwargs)
svc.api_token = token
svc.insecure = insecure
svc.request_id = request_id
kwargs['http'].max_request_size = svc._rootDesc.get('maxRequestSize', 0)
kwargs['http'].cache = None
kwargs['http']._request_id = lambda: svc.request_id or util.new_request_id()
return svc
def api_from_config(version=None, apiconfig=None, **kwargs):
"""Return an apiclient Resources object enabling access to an Arvados server
instance.
:version:
A string naming the version of the Arvados REST API to use (for
example, 'v1').
:apiconfig:
If provided, this should be a dict-like object (must support the get()
method) with entries for ARVADOS_API_HOST, ARVADOS_API_TOKEN, and
optionally ARVADOS_API_HOST_INSECURE. If not provided, use
arvados.config (which gets these parameters from the environment by
default.)
Other keyword arguments such as `cache` will be passed along `api()`
"""
# Load from user configuration or environment
if apiconfig is None:
apiconfig = config.settings()
errors = []
for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
if x not in apiconfig:
errors.append(x)
if errors:
raise ValueError(" and ".join(errors)+" not set.\nPlease set in %s or export environment variable." % config.default_config_file)
host = apiconfig.get('ARVADOS_API_HOST')
token = apiconfig.get('ARVADOS_API_TOKEN')
insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig)
return api(version=version, host=host, token=token, insecure=insecure, **kwargs)
| 37.564748 | 193 | 0.682754 |
e0137ef4822c0fcfd6e7a7c29b75c617b72e7532 | 297 | py | Python | src/demo_tzbtc/types/tzbtc/storage.py | r4ravi2008/dipdup-py | 1be3c3ac705f687af058ff51cde7970878584562 | [
"MIT"
] | null | null | null | src/demo_tzbtc/types/tzbtc/storage.py | r4ravi2008/dipdup-py | 1be3c3ac705f687af058ff51cde7970878584562 | [
"MIT"
] | null | null | null | src/demo_tzbtc/types/tzbtc/storage.py | r4ravi2008/dipdup-py | 1be3c3ac705f687af058ff51cde7970878584562 | [
"MIT"
] | null | null | null | # generated by datamodel-codegen:
# filename: storage.json
from __future__ import annotations
from typing import Dict
from pydantic import BaseModel, Field
class TzbtcStorage(BaseModel):
big_map: Dict[str, str]
bool: bool
lambda_: str = Field(..., alias='lambda')
nat: str
| 18.5625 | 45 | 0.713805 |
7b9faf2d3dbef9e6e589ddeab6de0904ee532ae2 | 3,923 | py | Python | mods/Roomba/client.py | thermalpilot/opennero | 1bb1ba083cf2576e09bb7cfeac013d6940a47afe | [
"BSD-3-Clause"
] | 1 | 2015-06-21T00:52:25.000Z | 2015-06-21T00:52:25.000Z | mods/Roomba/client.py | thermalpilot/opennero | 1bb1ba083cf2576e09bb7cfeac013d6940a47afe | [
"BSD-3-Clause"
] | null | null | null | mods/Roomba/client.py | thermalpilot/opennero | 1bb1ba083cf2576e09bb7cfeac013d6940a47afe | [
"BSD-3-Clause"
] | null | null | null | from OpenNero import *
from random import seed
# add the key and mouse bindings
from inputConfig import *
# add network utils
from common import *
from module import getMod, delMod
### called from gui elements ############################
def toggle_ai_callback(pauseButton):
""" pause and resume all AI agents """
toggle_ai()
if pauseButton.text == 'Pause!':
disable_ai()
pauseButton.text = 'Resume!'
else:
pauseButton.text = 'Pause!'
reset_ai()
def toggle_bot_type(changeBotButton, botTypeBox):
if botTypeBox.text.lower().find('script') >= 0:
botTypeBox.text = 'rtNEAT'
changeBotButton.text = 'Switch to Script'
else:
botTypeBox.text = 'Script'
changeBotButton.text = 'Switch to rtNEAT'
def remove_bots_closure(removeBotsButton, addBotsButton):
def closure():
removeBotsButton.enabled = False
addBotsButton.enabled = True
getMod().remove_bots()
return closure
def add_bots_closure(removeBotsButton, addBotsButton, botTypeBox, numBotBox):
def closure():
removeBotsButton.enabled = True
addBotsButton.enabled = False
getMod().add_bots(botTypeBox.text, numBotBox.text)
return closure
def CreateGui(guiMan):
window_width = 250 # width
guiMan.setTransparency(1.0)
guiMan.setFont("data/gui/fonthaettenschweiler.bmp")
botTypeBox = gui.create_edit_box(guiMan, 'botType', Pos2i(10,10), Pos2i(110,30), 'Script')
numBotBox = gui.create_edit_box(guiMan, 'numBot', Pos2i(130,10), Pos2i(40,30), '5')
addBotButton = gui.create_button(guiMan, 'addBot', Pos2i(180,10), Pos2i(60,30), '')
addBotButton.text = "Add bots"
changeBotButton = gui.create_button(guiMan, 'changeBot', Pos2i(10,50), Pos2i(230,30), '')
changeBotButton.text = "Switch to rtNEAT"
changeBotButton.OnMouseLeftClick = lambda:toggle_bot_type(changeBotButton, botTypeBox)
w = (window_width - 40) / 3
pauseButton = gui.create_button( guiMan, 'pause', Pos2i(10,90), Pos2i(w,30), '' )
pauseButton.text = 'Pause!'
pauseButton.OnMouseLeftClick = lambda:toggle_ai_callback(pauseButton)
removeBotButton = gui.create_button(guiMan, 'cleanBot', Pos2i(10 + (w + 10),90), Pos2i(w,30), '')
removeBotButton.text = "Remove bots"
exitButton = gui.create_button(guiMan, 'exit', Pos2i(10 + 2 * (w + 10),90), Pos2i(w,30), '')
exitButton.text = "Exit"
exitButton.OnMouseLeftClick = lambda: switchToHub()
addBotButton.OnMouseLeftClick = add_bots_closure(removeBotButton, addBotButton, botTypeBox, numBotBox)
removeBotButton.OnMouseLeftClick = remove_bots_closure(removeBotButton, addBotButton)
addBotButton.enabled = True
removeBotButton.enabled = False
AiWindow = gui.create_window( guiMan, 'AiWindow', Pos2i(530,20), Pos2i(window_width,150), 'AI Controls' )
AiWindow.addChild(botTypeBox)
AiWindow.addChild(numBotBox)
AiWindow.addChild(changeBotButton)
AiWindow.addChild(addBotButton)
AiWindow.addChild(pauseButton)
AiWindow.addChild(removeBotButton)
AiWindow.addChild(exitButton)
def ClientMain():
# disable physics and AI updates at first
# disable_physics()
disable_ai()
# initialize random number generator with current time
seed()
# add a camera
camRotateSpeed = 100
camMoveSpeed = 1500
camZoomSpeed = 100
cam = getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
cam.setPosition(Vector3f(100, 100, 50))
cam.setTarget(Vector3f(1, 1, 1))
cam.setFarPlane(1000)
cam.setEdgeScroll(False)
getMod().setup_sandbox()
# add a light source
getSimContext().addLightSource(Vector3f(500,-500,1000), 1500)
# create the io map
getSimContext().setInputMapping(createInputMapping())
# setup the gui
CreateGui(getSimContext().getGuiManager())
| 33.245763 | 109 | 0.685445 |
b6f2bfd4c53dc7736e98c551cf894305b999c9d2 | 2,996 | py | Python | moto/cloudformation/custom_model.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 5,460 | 2015-01-01T01:11:17.000Z | 2022-03-31T23:45:38.000Z | moto/cloudformation/custom_model.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 4,475 | 2015-01-05T19:37:30.000Z | 2022-03-31T13:55:12.000Z | moto/cloudformation/custom_model.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 1,831 | 2015-01-14T00:00:44.000Z | 2022-03-31T20:30:04.000Z | import json
import threading
from moto import settings
from moto.core.models import CloudFormationModel
from moto.awslambda import lambda_backends
from uuid import uuid4
class CustomModel(CloudFormationModel):
def __init__(self, region_name, request_id, logical_id, resource_name):
self.region_name = region_name
self.request_id = request_id
self.logical_id = logical_id
self.resource_name = resource_name
self.data = dict()
self._finished = False
def set_data(self, data):
self.data = data
self._finished = True
def is_created(self):
return self._finished
@property
def physical_resource_id(self):
return self.resource_name
@staticmethod
def cloudformation_type():
return "?"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
logical_id = kwargs["LogicalId"]
stack_id = kwargs["StackId"]
resource_type = kwargs["ResourceType"]
properties = cloudformation_json["Properties"]
service_token = properties["ServiceToken"]
backend = lambda_backends[region_name]
fn = backend.get_function(service_token)
request_id = str(uuid4())
custom_resource = CustomModel(
region_name, request_id, logical_id, resource_name
)
from moto.cloudformation import cloudformation_backends
stack = cloudformation_backends[region_name].get_stack(stack_id)
stack.add_custom_resource(custom_resource)
event = {
"RequestType": "Create",
"ServiceToken": service_token,
# A request will be send to this URL to indicate success/failure
# This request will be coming from inside a Docker container
# Note that, in order to reach the Moto host, the Moto-server should be listening on 0.0.0.0
#
# Alternative: Maybe we should let the user pass in a container-name where Moto is running?
# Similar to how we know for sure that the container in our CI is called 'motoserver'
"ResponseURL": f"{settings.moto_server_host()}/cloudformation_{region_name}/cfnresponse?stack={stack_id}",
"StackId": stack_id,
"RequestId": request_id,
"LogicalResourceId": logical_id,
"ResourceType": resource_type,
"ResourceProperties": properties,
}
invoke_thread = threading.Thread(
target=fn.invoke, args=(json.dumps(event), {}, {})
)
invoke_thread.start()
return custom_resource
@classmethod
def has_cfn_attr(cls, attribute):
# We don't know which attributes are supported for third-party resources
return True
def get_cfn_attribute(self, attribute_name):
if attribute_name in self.data:
return self.data[attribute_name]
return None
| 32.923077 | 118 | 0.656876 |
88057af06683f764c46e3a00daa8828915518fdf | 3,803 | py | Python | models/subscription.py | tervay/the-blue-alliance | e14c15cb04b455f90a2fcfdf4c1cdbf8454e17f8 | [
"MIT"
] | 1 | 2016-03-19T20:29:35.000Z | 2016-03-19T20:29:35.000Z | models/subscription.py | gregmarra/the-blue-alliance | 5bedaf5c80b4623984760d3da3289640639112f9 | [
"MIT"
] | 11 | 2020-10-10T03:05:29.000Z | 2022-02-27T09:57:22.000Z | models/subscription.py | gregmarra/the-blue-alliance | 5bedaf5c80b4623984760d3da3289640639112f9 | [
"MIT"
] | null | null | null | import json
from google.appengine.ext import ndb
from consts.model_type import ModelType
from consts.notification_type import NotificationType
class Subscription(ndb.Model):
"""
In order to make strongly consistent DB requests, instances of this class
should be created with a parent that is the associated Account key.
"""
user_id = ndb.StringProperty(required=True)
model_key = ndb.StringProperty(required=True)
model_type = ndb.IntegerProperty(required=True)
notification_types = ndb.IntegerProperty(repeated=True)
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
def __init__(self, *args, **kw):
self._settings = None
super(Subscription, self).__init__(*args, **kw)
@property
def notification_names(self):
return [NotificationType.render_names[index] for index in self.notification_types]
@classmethod
def users_subscribed_to_event(cls, event, notification_type):
"""
Get user IDs subscribed to an Event or the year an Event occurs in and a given notification type.
Ex: (model_key == `2020miket` or `2020*`) and (notification_type == NotificationType.UPCOMING_MATCH)
Args:
event (models.event.Event): The Event to query Subscription for.
notification_type (consts.notification_type.NotificationType): A NotificationType for the Subscription.
Returns:
list (string): List of user IDs with Subscriptions to the given Event/notification type.
"""
users = Subscription.query(
Subscription.model_key.IN([event.key_name, "{}*".format(event.year)]),
Subscription.notification_types == notification_type,
Subscription.model_type == ModelType.EVENT,
projection=[Subscription.user_id]
).fetch()
return list(set([user.user_id for user in users]))
@classmethod
def users_subscribed_to_team(cls, team, notification_type):
"""
Get user IDs subscribed to a Team and a given notification type.
Ex: team_key == `frc7332` and notification_type == NotificationType.UPCOMING_MATCH
Args:
team (models.team.Team): The Team to query Subscription for.
notification_type (consts.notification_type.NotificationType): A NotificationType for the Subscription.
Returns:
list (string): List of user IDs with Subscriptions to the given Team/notification type.
"""
users = Subscription.query(
Subscription.model_key == team.key_name,
Subscription.notification_types == notification_type,
Subscription.model_type == ModelType.TEAM,
projection=[Subscription.user_id]
).fetch()
return list(set([user.user_id for user in users]))
@classmethod
def users_subscribed_to_match(cls, match, notification_type):
"""
Get user IDs subscribed to a Match and a given notification type.
Ex: team_key == `2020miket_qm1` and notification_type == NotificationType.UPCOMING_MATCH
Args:
match (models.match.Match): The Match to query Subscription for.
notification_type (consts.notification_type.NotificationType): A NotificationType for the Subscription.
Returns:
list (string): List of user IDs with Subscriptions to the given Team/notification type.
"""
users = Subscription.query(
Subscription.model_key == match.key_name,
Subscription.notification_types == notification_type,
Subscription.model_type == ModelType.MATCH,
projection=[Subscription.user_id]
).fetch()
return list(set([user.user_id for user in users]))
| 40.892473 | 115 | 0.680515 |
58b9871a773f70871f5f17ba35de3a54a97270a7 | 14,395 | py | Python | code/snp_network_from_gene_network.py | chagaz/sfan | 14f1b336caa80bc176ec9459019f309ccd92252d | [
"MIT"
] | 12 | 2016-03-07T09:14:38.000Z | 2020-05-18T07:45:58.000Z | code/snp_network_from_gene_network.py | chagaz/sfan | 14f1b336caa80bc176ec9459019f309ccd92252d | [
"MIT"
] | 12 | 2016-07-29T09:38:18.000Z | 2016-07-29T15:22:04.000Z | code/snp_network_from_gene_network.py | chagaz/sfan | 14f1b336caa80bc176ec9459019f309ccd92252d | [
"MIT"
] | 3 | 2017-08-09T23:12:42.000Z | 2020-05-16T18:12:44.000Z | # snp_network_from_gene_network.py -- Create SNPs network
#
# jean-daniel.granet@mines-paristech.fr
from __future__ import print_function
import sys
import argparse
import time
from operator import itemgetter
import numpy as np # numerical python module
import scipy.sparse as sp # scientific python sparse module
from sympy import Interval, Union
def main():
TotalStartTime = time.time()
#---------------------------------------------------------------------------
# /!\ in files, chromo numerotation begin to 1
chromo_num_correspondance = {str(i): i for i in xrange(1, 23) }
chromo_num_correspondance['X'] = 23
chromo_num_correspondance['Y'] = 24
chromo_num_correspondance['XY'] = 25 # pseudo autosomal region'
chromo_num_correspondance['MT'] = 26
# no unlocalized sequences, unplaced sequences, alternate loci
# TODO : but they can be added here if needeed
#---------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Create SNPs network')
parser.add_argument('hugo_ppi', help='gene network in .SIF format')
parser.add_argument('snps_list', help='SNPs positions in .MAP format')
parser.add_argument('genes_list', help='gene positions. tab separated fields are : <Chromosome Name> <Gene Start (bp)> <Gene End (bp)> <HGNC symbol>')
parser.add_argument('window', help='window for SNP-gene association', type = int)
parser.add_argument('output', help='output file : snp network in .DIMACS format')
args = parser.parse_args()
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# create a list wich contains SNPs positions. Using PLINK's internal numeric coding for chromosome number
print ('Creation of the list of SNPS positions : ', end="")
Start = time.time()
SNPs = list()
with open(args.snps_list, 'r') as fdMap:
# Map file structure description : http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#map
for (line_idx, line) in enumerate(fdMap):
if line_idx > 0: # avoid header of map file
line_split = line.split()
if int(line_split[3]) >= 0: # "To exclude a SNP from analysis, set the 4th column (physical base-pair position) to any negative value" -- http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#map
if line_split[0] == 'X': # X chromosome
SNPs.append((23, int(line_split[3])))
elif line_split[0] == 'Y': # Y chromosome
SNPs.append((24, int(line_split[3])))
elif line_split[0] == 'XY': # Pseudo-autosomal region of X
SNPs.append((25, int(line_split[3])))
elif line_split[0] == 'MT': # Mitochondrial
SNPs.append((26, int(line_split[3])))
else:
SNPs.append((int(line_split[0]), int(line_split[3])))
fdMap.close()
End = time.time()
print ('\033[92m' + 'DONE' + '\033[0m')
print ('Exec time :' + str(End - Start))
#---------------------------------------------------------------------------
# sort the SNPs array by chromosome
SNPs.sort(key=itemgetter(0, 1))
# create a sparse matrix of size len(SNPs) x len(SNPs) to create the network
print ('Creation of the matrix to save the network : ', end ="")
net = sp.lil_matrix((len(SNPs), len(SNPs)))
# Before add net [snp1, snp2] = 1, always check that net [snp2, snp1] is not already 1.
# It can be the case for example if :
# _______________|__________|___________________ genomique sequence
# [ snp1 snp2 ]
# <------------------------------>
# gene A
# because snp1 and snp2 are neighbor and they in the same gene
# Other example : neighbor, and belonging to different gene that interacting
print ('\033[92m' + 'DONE' + '\033[0m')
#---------------------------------------------------------------------------
# connect each SNPs to the nearest
print ('Connect each SNPs to the nearest : ', end ="")
Start = time.time()
Chromosome = [(-1,-1)] * 26 # 26 = 22 pairs of autosomes (1-22) + 3 gonosomes : X (23), Y (24), XY (25) + 1 mitochondrial : MT (26)
# This list saves, for each chromo, which SNP is at the begining, which one is at the end
for idxSNP in xrange(0, len(SNPs)):
# handle first read SNPs that can not be on the first Chromo
if idxSNP == 0 :
Chromosome[SNPs[idxSNP][0] -1 ] = (0, -1) #/!\ -1 needed because in Python, chromo numeratation begin to 0, not 1
# Can't do Chromosome[SNPs[idxSNP][0][0] because tuple do not support item assignment
if idxSNP + 1 < len(SNPs): # the current SNP is not the last. +1 is needed because numerotation begin to 0
if SNPs[idxSNP][0] != SNPs[idxSNP + 1][0]: # the current SNP and the next one are not on the same chromosome
# save the current SNP_index as the last SNP of the current chromo :
Chromosome[SNPs[idxSNP][0] - 1] = (Chromosome[SNPs[idxSNP][0] - 1][0], idxSNP) # /!\ -1 needed because in Python, chromo numeratation begin to 0, not 1
# save the next SNP index as the first of the next chromo :
Chromosome[SNPs[idxSNP][0]] = (idxSNP + 1, 0)
if SNPs[idxSNP][0] == SNPs[idxSNP + 1][0]: # the current SNP and the next one are on the same chromosome -> connect them
net[idxSNP, idxSNP + 1] = 1
else: # the current studied SNP is the last one
Chromosome[SNPs[idxSNP][0] - 1] = (Chromosome[SNPs[idxSNP][0] - 1][0], idxSNP)
print ('\033[92m' + 'DONE' + '\033[0m' )
End = time.time()
print ('Exec time :' + str(End - Start))
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# read hugogenes.txt and save gene duplicates of each Hgs of the file in
# into a dictionnary :
print ('Save the genes positions, taking the window into account : ', end="")
Start = time.time()
genes = dict()
# key : hugo gene symbol
# values : tuples containing 2 items :
# - the first = chromo num
# - the second = Interval
# ( starting position - window value, ending position + window value)
# - the third = list of SPNs (empty list at this steps)
# genes : one tuple per chromosome where duplicates are founded
# /!\ 'genes duplicates' =/= occurence of HUGO gene symbol in hugo file
with open(args.genes_list, 'r') as fdHugo:
# each line : num chromo \t start pos \t end pos \t HUGO gene symbol
# /!\ no header ! should be removed before
for line_idx, line in enumerate(fdHugo):
# get data from file :
line_split = line.split('\t')
current_chromo_num = chromo_num_correspondance.get(line_split[0])
if current_chromo_num : # if no unlocalized sequences, unplaced sequences, alternate loci
current_Hgs = line_split[3].strip()
if not current_Hgs :
current_Hgs = line_idx
current_Interval = Interval(int(line_split[1]) - args.window, int(line_split[2]) + args.window) # take the window into account
# if line_split[1] = start pos < window , start pos - window < 0, but it is not a problem
current_data = (current_chromo_num, current_Interval , list() )
if current_Hgs not in genes.keys() :
genes[current_Hgs] = list()
#print ('before', genes[current_Hgs])
#print (current_data)
# handle multi occurence and save new data
chromo_num_list = [genes[current_Hgs][i][0] for i in xrange (len (genes[current_Hgs]))]
if current_chromo_num in chromo_num_list : # there is possibly an overlap
# get the index of the tuple holding info on the same chromo...
duplicate_idx = chromo_num_list.index(current_chromo_num)
# ... and the associated data,
duplicate_data = genes[current_Hgs][duplicate_idx]
# merge old and current data,
to_save = (current_chromo_num, Union (current_Interval, duplicate_data[1]), list() )
# and save the merged data :
genes[current_Hgs][duplicate_idx] = to_save
else : # no overlap are possible,
# thus just add a new tuple holding current data :
genes[current_Hgs].append(current_data)
#print ('after', genes[current_Hgs])
print('.', end = '')
print ('\033[92m' + 'DONE' + '\033[0m')
End = time.time()
print ('Exec time :' + str(End - Start))
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# attach the SNPs to the genes
print ('Attach each SNPs of a gene to each other : ', end = "")
Start = time.time()
for Hgs in genes:
print('.', end = '')
#----------
# List SNPs belonging to a gene :
SNPs_in_Hgs = list()
# Each Hgs can be on several chromo.
# Get the list of chromo num of current Hgs:
list_chromo_num = [dupe[0]-1 for dupe in genes[Hgs] ]
# /!\ -1 because numeration begin to 0 and not to 1
# For each chromo num, we have an union of interval :
for dupe_idx, chromo_num in enumerate(list_chromo_num) :
# Get the range of SNP that are positionated on this chromo :
num_SNP_range = xrange(Chromosome[chromo_num][0], Chromosome[chromo_num][1] + 1)
# For each SNP in this range :
for SNP_idx in num_SNP_range :
# If the SNP belong to gene dupe on this chromo (take the window into account)
if SNPs[SNP_idx][1] in genes[Hgs][dupe_idx][1] :
# Add the gene to the list :
SNPs_in_Hgs.append(SNP_idx)
genes[Hgs][dupe_idx][2].append(SNP_idx)
#----------
# Attach each SNPs of a gene to each other :
for SNP_idx1 in xrange(len(SNPs_in_Hgs)):
for SNP_idx2 in xrange(SNP_idx1 + 1, len(SNPs_in_Hgs)):
if net[SNPs_in_Hgs[SNP_idx2], SNPs_in_Hgs[SNP_idx2]] != 1:
net[SNPs_in_Hgs[SNP_idx1], SNPs_in_Hgs[SNP_idx2]] = 1
else : import pdb; pdb.set_trace()
print ('\033[92m' + 'DONE' + '\033[0m')
End = time.time()
print ('Exec time :' + str(End - Start) )
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# connect the SNPs of gene A to the SNPs of gene B
print ('Connect each SNP of Hgs A to each SNP of Hgs B : ', end ="")
Start = time.time()
with open(args.hugo_ppi, 'r') as fdAcsn:
# each line : <hgsA> <name of relationship> <hgsB>
for line in fdAcsn:
line_split = line.split()
HgsA = line_split[0]
HgsB = line_split[2]
# we can only use Hgs for which we have infos about localisation :
if HgsA in genes and HgsB in genes:
# get SNPs of these Hgs :
SNPs_of_A = [SNP_idx for dupe_infos in genes[HgsA] for SNP_idx in dupe_infos[2] ]
SNPs_of_B = [SNP_idx for dupe_infos in genes[HgsB] for SNP_idx in dupe_infos[2] ]
# if these 2 interacting Hgs have some SNPs :
# genes[Hgs] = (chromo num, Interval, [list, of, SNPs, indices])
if len( SNPs_of_A ) > 0 and len(SNPs_of_B) > 0:
# Connect each SNP of hgsA to each SNP of hgsB :
for SNPA in SNPs_of_A:
for SNPB in SNPs_of_B:
if net[SNPB, SNPA] != 1:
net[ SNPA, SNPB] = 1
#else : import pdb; pdb.set_trace()
fdAcsn.close()
print ('\033[92m' + 'DONE' + '\033[0m')
End = time.time()
print ('Exec time :' + str(End - Start))
#---------------------------------------------------------------------------
import pdb; pdb.set_trace()
#---------------------------------------------------------------------------
# write the network into the output file
print ('Write the network into the output file : ', end ="")
# dimacs format
Start = time.time()
[X, Y] = net.nonzero()
#X : line number with non zero data
#Y : for each X, col number with non zero data
# /!\ dimacs numeration begin to 1 but python to 0 !!
# so move the numeratation :
X += 1
Y += 1
array_xy = zip(X, Y)
# array_xy is a list of tuples, where i-th tuple contains the i-th element from X and Y
with open(args.output, 'w') as fdOutput:
# write problem line :
fdOutput.write('p max %d %d \n' % (len(SNPs), len(array_xy)*2 ) ) # * 2 because edges are written in both direction
# write an Arc Descriptor
# for each coord of non zero value in net :
for (x, y) in array_xy:
if x != y: # if non zero value is not on diag... why ???
# write
fdOutput.write('a %d %d 1\n' % (x, y) )
fdOutput.write('a %d %d 1\n' % (y, x) )
# Remark : need an undirected graph
# -> we give the link in both direction
fdOutput.close()
print ('\033[92m' + 'DONE' + '\033[0m')
End = time.time()
print ('Exec time :' + str(End - Start))
#---------------------------------------------------------------------------
TotalEndTime = time.time()
print ("Total excution time :" + str(TotalEndTime - TotalStartTime))
if __name__ == "__main__":
main()
| 49.637931 | 211 | 0.528378 |
f186bedbc11d3c1cc4a557de35fdda8237e512a0 | 2,399 | py | Python | ai_flow/util/process_utils.py | SteNicholas/ai-flow | 2c70547981f1516f0e37bbe6936a1b7cccd31822 | [
"Apache-2.0"
] | null | null | null | ai_flow/util/process_utils.py | SteNicholas/ai-flow | 2c70547981f1516f0e37bbe6936a1b7cccd31822 | [
"Apache-2.0"
] | null | null | null | ai_flow/util/process_utils.py | SteNicholas/ai-flow | 2c70547981f1516f0e37bbe6936a1b7cccd31822 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import os
import signal
import time
import psutil
from typing import List
logger = logging.getLogger(__name__)
def get_all_children_pids(current_pid=None) -> List:
result = []
if not psutil.pid_exists(current_pid):
return result
p = psutil.Process(current_pid)
for pp in p.children():
result.append(pp.pid)
result.extend(get_all_children_pids(pp.pid))
return result
def check_pid_exist(_pid):
try:
os.kill(_pid, 0)
except OSError:
return False
else:
return True
def stop_process(pid, process_name, timeout_sec: int = 60):
"""
Try hard to kill the process with the given pid. It sends
:param pid: The process pid to stop
:param process_name: The process name to log
:param timeout_sec: timeout before sending SIGKILL to kill the process
"""
try:
start_time = time.monotonic()
while check_pid_exist(pid):
if time.monotonic() - start_time > timeout_sec:
raise RuntimeError(
"{} pid: {} does not exit after {} seconds.".format(process_name, pid, timeout_sec))
os.kill(pid, signal.SIGTERM)
time.sleep(1)
except Exception:
logger.warning("Failed to stop {} pid: {} with SIGTERM. Try to send SIGKILL".format(process_name, pid))
try:
os.kill(pid, signal.SIGKILL)
except Exception as e:
raise RuntimeError("Failed to kill {} pid: {} with SIGKILL.".format(process_name, pid)) from e
logger.info("{} pid: {} stopped".format(process_name, pid))
| 32.418919 | 111 | 0.681951 |
b75c39ebf4fa897681948d3b121466d779c5da47 | 1,660 | py | Python | source/mysql/setup_common.py | Saanidhyavats/dffml | 5664a75aa7fcd5921b1a3e2da9203d94ed960286 | [
"MIT"
] | 3 | 2021-03-08T18:41:21.000Z | 2021-06-05T20:15:14.000Z | source/mysql/setup_common.py | NikhilBartwal/dffml | 16180144f388924d9e5840c4aa80d08970af5e60 | [
"MIT"
] | 1 | 2021-03-20T07:05:55.000Z | 2021-03-20T07:05:55.000Z | source/mysql/setup_common.py | NikhilBartwal/dffml | 16180144f388924d9e5840c4aa80d08970af5e60 | [
"MIT"
] | 1 | 2021-04-19T23:58:26.000Z | 2021-04-19T23:58:26.000Z | import os
import sys
import ast
from pathlib import Path
from setuptools import find_packages
ORG = "intel"
NAME = "dffml-source-mysql"
DESCRIPTION = "DFFML Source for MySQL Protocol"
AUTHOR_NAME = "Sudharsana K J L"
AUTHOR_EMAIL = "kjlsudharsana@gmail.com"
IMPORT_NAME = (
NAME
if "replace_package_name".upper() != NAME
else "replace_import_package_name".upper()
).replace("-", "_")
SELF_PATH = Path(sys.argv[0]).parent.resolve()
if not (SELF_PATH / Path(IMPORT_NAME, "version.py")).is_file():
SELF_PATH = os.path.dirname(os.path.realpath(__file__))
VERSION = ast.literal_eval(
Path(SELF_PATH, IMPORT_NAME, "version.py")
.read_text()
.split("=")[-1]
.strip()
)
README = Path(SELF_PATH, "README.md").read_text()
KWARGS = dict(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
long_description_content_type="text/markdown",
author=AUTHOR_NAME,
author_email=AUTHOR_EMAIL,
maintainer="John Andersen",
maintainer_email="johnandersenpdx@gmail.com",
url=f"https://github.com/{ORG}/dffml/blob/master/source/{NAME}/README.md",
license="MIT",
keywords=["dffml"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
)
| 29.122807 | 78 | 0.666867 |
76e8e7424ba5e355f04dcf745a49f077cf4c2a60 | 9,473 | py | Python | Bio/PDB/Vector.py | rht/biopython | 3a44496d7bd79446266a4951b7d1f64569e4a96d | [
"BSD-3-Clause"
] | 3 | 2016-11-21T09:55:56.000Z | 2019-04-09T17:39:43.000Z | Bio/PDB/Vector.py | rht/biopython | 3a44496d7bd79446266a4951b7d1f64569e4a96d | [
"BSD-3-Clause"
] | 32 | 2016-11-21T07:38:21.000Z | 2017-08-16T13:00:03.000Z | Bio/PDB/Vector.py | rht/biopython | 3a44496d7bd79446266a4951b7d1f64569e4a96d | [
"BSD-3-Clause"
] | 8 | 2016-11-24T18:57:35.000Z | 2022-01-16T08:15:25.000Z | # Copyright (C) 2004, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Vector class, including rotation-related functions."""
from __future__ import print_function
import numpy
def m2rotaxis(m):
"""Return angles, axis pair that corresponds to rotation matrix m.
The case where `m` is the identity matrix corresponds to a singularity where any
rotation axis is valid. In that case, `Vector([1,0,0])`, is returned.
"""
eps = 1e-5
# Check for singularities a la http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToAngle/
if abs(m[0, 1] - m[1, 0]) < eps and abs(m[0, 2] - m[2, 0]) < eps and abs(m[1, 2] - m[2, 1]) < eps:
# Singularity encountered. Check if its 0 or 180 deg
if abs(m[0, 1] + m[1, 0]) < eps and abs(m[0, 2] + m[2, 0]) < eps and abs(m[1, 2] + m[2, 1]) < eps and abs(m[0, 0] + m[1, 1] + m[2, 2] - 3) < eps:
angle = 0
else:
angle = numpy.pi
else:
# Angle always between 0 and pi
# Sense of rotation is defined by axis orientation
t = 0.5 * (numpy.trace(m) - 1)
t = max(-1, t)
t = min(1, t)
angle = numpy.arccos(t)
if angle < 1e-15:
# Angle is 0
return 0.0, Vector(1, 0, 0)
elif angle < numpy.pi:
# Angle is smaller than pi
x = m[2, 1] - m[1, 2]
y = m[0, 2] - m[2, 0]
z = m[1, 0] - m[0, 1]
axis = Vector(x, y, z)
axis.normalize()
return angle, axis
else:
# Angle is pi - special case!
m00 = m[0, 0]
m11 = m[1, 1]
m22 = m[2, 2]
if m00 > m11 and m00 > m22:
x = numpy.sqrt(m00 - m11 - m22 + 0.5)
y = m[0, 1] / (2 * x)
z = m[0, 2] / (2 * x)
elif m11 > m00 and m11 > m22:
y = numpy.sqrt(m11 - m00 - m22 + 0.5)
x = m[0, 1] / (2 * y)
z = m[1, 2] / (2 * y)
else:
z = numpy.sqrt(m22 - m00 - m11 + 0.5)
x = m[0, 2] / (2 * z)
y = m[1, 2] / (2 * z)
axis = Vector(x, y, z)
axis.normalize()
return numpy.pi, axis
def vector_to_axis(line, point):
"""Vector to axis method.
Return the vector between a point and
the closest point on a line (ie. the perpendicular
projection of the point on the line).
@type line: L{Vector}
@param line: vector defining a line
@type point: L{Vector}
@param point: vector defining the point
"""
line = line.normalized()
np = point.norm()
angle = line.angle(point)
return point - line ** (np * numpy.cos(angle))
def rotaxis2m(theta, vector):
"""Calculate left multiplying rotation matrix.
Calculate a left multiplying rotation matrix that rotates
theta rad around vector.
Example:
>>> m=rotaxis(pi, Vector(1, 0, 0))
>>> rotated_vector=any_vector.left_multiply(m)
@type theta: float
@param theta: the rotation angle
@type vector: L{Vector}
@param vector: the rotation axis
@return: The rotation matrix, a 3x3 Numeric array.
"""
vector = vector.normalized()
c = numpy.cos(theta)
s = numpy.sin(theta)
t = 1 - c
x, y, z = vector.get_array()
rot = numpy.zeros((3, 3))
# 1st row
rot[0, 0] = t * x * x + c
rot[0, 1] = t * x * y - s * z
rot[0, 2] = t * x * z + s * y
# 2nd row
rot[1, 0] = t * x * y + s * z
rot[1, 1] = t * y * y + c
rot[1, 2] = t * y * z - s * x
# 3rd row
rot[2, 0] = t * x * z - s * y
rot[2, 1] = t * y * z + s * x
rot[2, 2] = t * z * z + c
return rot
rotaxis = rotaxis2m
def refmat(p, q):
"""Return a (left multiplying) matrix that mirrors p onto q.
Example:
>>> mirror=refmat(p, q)
>>> qq=p.left_multiply(mirror)
>>> print(q)
>>> print(qq) # q and qq should be the same
@type p,q: L{Vector}
@return: The mirror operation, a 3x3 Numeric array.
"""
p = p.normalized()
q = q.normalized()
if (p - q).norm() < 1e-5:
return numpy.identity(3)
pq = p - q
pq.normalize()
b = pq.get_array()
b.shape = (3, 1)
i = numpy.identity(3)
ref = i - 2 * numpy.dot(b, numpy.transpose(b))
return ref
def rotmat(p, q):
"""Return a (left multiplying) matrix that rotates p onto q.
Example:
>>> r=rotmat(p, q)
>>> print(q)
>>> print(p.left_multiply(r))
@param p: moving vector
@type p: L{Vector}
@param q: fixed vector
@type q: L{Vector}
@return: rotation matrix that rotates p onto q
@rtype: 3x3 Numeric array
"""
rot = numpy.dot(refmat(q, -p), refmat(p, -p))
return rot
def calc_angle(v1, v2, v3):
"""Calculate angle method.
Calculate the angle between 3 vectors
representing 3 connected points.
@param v1, v2, v3: the tree points that define the angle
@type v1, v2, v3: L{Vector}
@return: angle
@rtype: float
"""
v1 = v1 - v2
v3 = v3 - v2
return v1.angle(v3)
def calc_dihedral(v1, v2, v3, v4):
"""Calculate dihedral angle method.
Calculate the dihedral angle between 4 vectors
representing 4 connected points. The angle is in
]-pi, pi].
@param v1, v2, v3, v4: the four points that define the dihedral angle
@type v1, v2, v3, v4: L{Vector}
"""
ab = v1 - v2
cb = v3 - v2
db = v4 - v3
u = ab ** cb
v = db ** cb
w = u ** v
angle = u.angle(v)
# Determine sign of angle
try:
if cb.angle(w) > 0.001:
angle = -angle
except ZeroDivisionError:
# dihedral=pi
pass
return angle
class Vector(object):
"""3D vector."""
def __init__(self, x, y=None, z=None):
if y is None and z is None:
# Array, list, tuple...
if len(x) != 3:
raise ValueError("Vector: x is not a "
"list/tuple/array of 3 numbers")
self._ar = numpy.array(x, 'd')
else:
# Three numbers
self._ar = numpy.array((x, y, z), 'd')
def __repr__(self):
x, y, z = self._ar
return "<Vector %.2f, %.2f, %.2f>" % (x, y, z)
def __neg__(self):
"""Return Vector(-x, -y, -z)."""
a = -self._ar
return Vector(a)
def __add__(self, other):
"""Return Vector+other Vector or scalar."""
if isinstance(other, Vector):
a = self._ar + other._ar
else:
a = self._ar + numpy.array(other)
return Vector(a)
def __sub__(self, other):
"""Return Vector-other Vector or scalar."""
if isinstance(other, Vector):
a = self._ar - other._ar
else:
a = self._ar - numpy.array(other)
return Vector(a)
def __mul__(self, other):
"""Return Vector.Vector (dot product)."""
return sum(self._ar * other._ar)
def __div__(self, x):
"""Return Vector(coords/a)."""
a = self._ar / numpy.array(x)
return Vector(a)
def __pow__(self, other):
"""Return VectorxVector (cross product) or Vectorxscalar."""
if isinstance(other, Vector):
a, b, c = self._ar
d, e, f = other._ar
c1 = numpy.linalg.det(numpy.array(((b, c), (e, f))))
c2 = -numpy.linalg.det(numpy.array(((a, c), (d, f))))
c3 = numpy.linalg.det(numpy.array(((a, b), (d, e))))
return Vector(c1, c2, c3)
else:
a = self._ar * numpy.array(other)
return Vector(a)
def __getitem__(self, i):
return self._ar[i]
def __setitem__(self, i, value):
self._ar[i] = value
def __contains__(self, i):
return (i in self._ar)
def norm(self):
"""Return vector norm."""
return numpy.sqrt(sum(self._ar * self._ar))
def normsq(self):
"""Return square of vector norm."""
return abs(sum(self._ar * self._ar))
def normalize(self):
"""Normalize the Vector object.
Changes the state of `self` and doesn't return a value. If you need to chain function
calls or create a new object use the `normalized` method.
"""
if self.norm():
self._ar = self._ar / self.norm()
def normalized(self):
"""Return a normalized copy of the Vector.
To avoid allocating new objects use the `normalize` method.
"""
v = self.copy()
v.normalize()
return v
def angle(self, other):
"""Return angle between two vectors."""
n1 = self.norm()
n2 = other.norm()
c = (self * other) / (n1 * n2)
# Take care of roundoff errors
c = min(c, 1)
c = max(-1, c)
return numpy.arccos(c)
def get_array(self):
"""Return (a copy of) the array of coordinates."""
return numpy.array(self._ar)
def left_multiply(self, matrix):
"""Return Vector=Matrix x Vector."""
a = numpy.dot(matrix, self._ar)
return Vector(a)
def right_multiply(self, matrix):
"""Return Vector=Vector x Matrix."""
a = numpy.dot(self._ar, matrix)
return Vector(a)
def copy(self):
"""Return a deep copy of the Vector."""
return Vector(self._ar)
| 27.457971 | 153 | 0.535944 |
124cec97617af3651e6b550dec6409d6dcc5cc86 | 9,288 | py | Python | homeassistant/components/sensor/synologydsm.py | clementTal/home-assistant | 7bc2362e33f86d41769d753d1701a53ae60224d5 | [
"Apache-2.0"
] | 7 | 2018-08-03T10:15:36.000Z | 2019-03-25T13:31:55.000Z | homeassistant/components/sensor/synologydsm.py | clementTal/home-assistant | 7bc2362e33f86d41769d753d1701a53ae60224d5 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/sensor/synologydsm.py | clementTal/home-assistant | 7bc2362e33f86d41769d753d1701a53ae60224d5 | [
"Apache-2.0"
] | 3 | 2018-12-04T11:54:27.000Z | 2019-08-31T14:41:32.000Z | """
Support for Synology NAS Sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.synologydsm/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_PORT, CONF_SSL,
ATTR_ATTRIBUTION, TEMP_CELSIUS, CONF_MONITORED_CONDITIONS,
EVENT_HOMEASSISTANT_START, CONF_DISKS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['python-synology==0.2.0']
_LOGGER = logging.getLogger(__name__)
CONF_ATTRIBUTION = 'Data provided by Synology'
CONF_VOLUMES = 'volumes'
DEFAULT_NAME = 'Synology DSM'
DEFAULT_PORT = 5001
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
_UTILISATION_MON_COND = {
'cpu_other_load': ['CPU Load (Other)', '%', 'mdi:chip'],
'cpu_user_load': ['CPU Load (User)', '%', 'mdi:chip'],
'cpu_system_load': ['CPU Load (System)', '%', 'mdi:chip'],
'cpu_total_load': ['CPU Load (Total)', '%', 'mdi:chip'],
'cpu_1min_load': ['CPU Load (1 min)', '%', 'mdi:chip'],
'cpu_5min_load': ['CPU Load (5 min)', '%', 'mdi:chip'],
'cpu_15min_load': ['CPU Load (15 min)', '%', 'mdi:chip'],
'memory_real_usage': ['Memory Usage (Real)', '%', 'mdi:memory'],
'memory_size': ['Memory Size', 'Mb', 'mdi:memory'],
'memory_cached': ['Memory Cached', 'Mb', 'mdi:memory'],
'memory_available_swap': ['Memory Available (Swap)', 'Mb', 'mdi:memory'],
'memory_available_real': ['Memory Available (Real)', 'Mb', 'mdi:memory'],
'memory_total_swap': ['Memory Total (Swap)', 'Mb', 'mdi:memory'],
'memory_total_real': ['Memory Total (Real)', 'Mb', 'mdi:memory'],
'network_up': ['Network Up', 'Kbps', 'mdi:upload'],
'network_down': ['Network Down', 'Kbps', 'mdi:download'],
}
_STORAGE_VOL_MON_COND = {
'volume_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'volume_device_type': ['Type', None, 'mdi:harddisk'],
'volume_size_total': ['Total Size', None, 'mdi:chart-pie'],
'volume_size_used': ['Used Space', None, 'mdi:chart-pie'],
'volume_percentage_used': ['Volume Used', '%', 'mdi:chart-pie'],
'volume_disk_temp_avg': ['Average Disk Temp', None, 'mdi:thermometer'],
'volume_disk_temp_max': ['Maximum Disk Temp', None, 'mdi:thermometer'],
}
_STORAGE_DSK_MON_COND = {
'disk_name': ['Name', None, 'mdi:harddisk'],
'disk_device': ['Device', None, 'mdi:dots-horizontal'],
'disk_smart_status': ['Status (Smart)', None,
'mdi:checkbox-marked-circle-outline'],
'disk_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'disk_exceed_bad_sector_thr': ['Exceeded Max Bad Sectors', None,
'mdi:test-tube'],
'disk_below_remain_life_thr': ['Below Min Remaining Life', None,
'mdi:test-tube'],
'disk_temp': ['Temperature', None, 'mdi:thermometer'],
}
_MONITORED_CONDITIONS = list(_UTILISATION_MON_COND.keys()) + \
list(_STORAGE_VOL_MON_COND.keys()) + \
list(_STORAGE_DSK_MON_COND.keys())
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=True): cv.boolean,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(_MONITORED_CONDITIONS)]),
vol.Optional(CONF_DISKS): cv.ensure_list,
vol.Optional(CONF_VOLUMES): cv.ensure_list,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Synology NAS Sensor."""
def run_setup(event):
"""Wait until Home Assistant is fully initialized before creating.
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_ssl = config.get(CONF_SSL)
unit = hass.config.units.temperature_unit
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
api = SynoApi(host, port, username, password, unit, use_ssl)
sensors = [SynoNasUtilSensor(
api, variable, _UTILISATION_MON_COND[variable])
for variable in monitored_conditions
if variable in _UTILISATION_MON_COND]
# Handle all volumes
for volume in config.get(CONF_VOLUMES, api.storage.volumes):
sensors += [SynoNasStorageSensor(
api, variable, _STORAGE_VOL_MON_COND[variable], volume)
for variable in monitored_conditions
if variable in _STORAGE_VOL_MON_COND]
# Handle all disks
for disk in config.get(CONF_DISKS, api.storage.disks):
sensors += [SynoNasStorageSensor(
api, variable, _STORAGE_DSK_MON_COND[variable], disk)
for variable in monitored_conditions
if variable in _STORAGE_DSK_MON_COND]
add_devices(sensors, True)
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class SynoApi:
"""Class to interface with Synology DSM API."""
def __init__(self, host, port, username, password, temp_unit, use_ssl):
"""Initialize the API wrapper class."""
from SynologyDSM import SynologyDSM
self.temp_unit = temp_unit
try:
self._api = SynologyDSM(host, port, username, password,
use_https=use_ssl)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.error("Error setting up Synology DSM")
# Will be updated when update() gets called.
self.utilisation = self._api.utilisation
self.storage = self._api.storage
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update function for updating api information."""
self._api.update()
class SynoNasSensor(Entity):
"""Representation of a Synology NAS Sensor."""
def __init__(self, api, variable, variable_info, monitor_device=None):
"""Initialize the sensor."""
self.var_id = variable
self.var_name = variable_info[0]
self.var_units = variable_info[1]
self.var_icon = variable_info[2]
self.monitor_device = monitor_device
self._api = api
@property
def name(self):
"""Return the name of the sensor, if any."""
if self.monitor_device is not None:
return "{} ({})".format(self.var_name, self.monitor_device)
return self.var_name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self.var_id in ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']:
return self._api.temp_unit
return self.var_units
def update(self):
"""Get the latest data for the states."""
if self._api is not None:
self._api.update()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
}
class SynoNasUtilSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
network_sensors = ['network_up', 'network_down']
memory_sensors = ['memory_size', 'memory_cached',
'memory_available_swap', 'memory_available_real',
'memory_total_swap', 'memory_total_real']
if self.var_id in network_sensors or self.var_id in memory_sensors:
attr = getattr(self._api.utilisation, self.var_id)(False)
if self.var_id in network_sensors:
return round(attr / 1024.0, 1)
elif self.var_id in memory_sensors:
return round(attr / 1024.0 / 1024.0, 1)
else:
return getattr(self._api.utilisation, self.var_id)
class SynoNasStorageSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
temp_sensors = ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']
if self.monitor_device is not None:
if self.var_id in temp_sensors:
attr = getattr(
self._api.storage, self.var_id)(self.monitor_device)
if self._api.temp_unit == TEMP_CELSIUS:
return attr
return round(attr * 1.8 + 32.0, 1)
return getattr(self._api.storage, self.var_id)(self.monitor_device)
| 38.222222 | 79 | 0.640289 |
bb233df6e3fcae25f192b44c64ed536e77ad33f0 | 8,146 | py | Python | util.py | Ankuraxz/Detectx-Yolo-V3 | bb3dcd75741131f22fc17337e5e8fe9fe9a3bd6d | [
"MIT"
] | 37 | 2019-08-10T05:00:33.000Z | 2022-03-23T20:13:07.000Z | util.py | Ankuraxz/Detectx-Yolo-V3 | bb3dcd75741131f22fc17337e5e8fe9fe9a3bd6d | [
"MIT"
] | 3 | 2019-11-20T20:23:43.000Z | 2021-04-14T18:48:17.000Z | util.py | Ankuraxz/Detectx-Yolo-V3 | bb3dcd75741131f22fc17337e5e8fe9fe9a3bd6d | [
"MIT"
] | 42 | 2019-08-10T05:00:36.000Z | 2021-10-01T19:59:40.000Z |
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
import matplotlib.pyplot as plt
def count_parameters(model):
return sum(p.numel() for p in model.parameters())
def count_learnable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def convert2cpu(matrix):
if matrix.is_cuda:
return torch.FloatTensor(matrix.size()).copy_(matrix)
else:
return matrix
def bbox_iou(box1, box2):
"""
Returns the IoU of two bounding boxes
"""
#Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:,0], box1[:,1], box1[:,2], box1[:,3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:,0], box2[:,1], box2[:,2], box2[:,3]
#get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
#Intersection area
if torch.cuda.is_available():
inter_area = torch.max(inter_rect_x2 - inter_rect_x1 + 1,torch.zeros(inter_rect_x2.shape).cuda())*torch.max(inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape).cuda())
else:
inter_area = torch.max(inter_rect_x2 - inter_rect_x1 + 1,torch.zeros(inter_rect_x2.shape))*torch.max(inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape))
#Union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
def transformOutput(prediction, inp_dim, anchors, num_classes, CUDA = True):
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
grid_size = inp_dim // stride
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
anchors = [(a[0]/stride, a[1]/stride) for a in anchors]
prediction = prediction.view(batch_size, bbox_attrs*num_anchors, grid_size*grid_size)
prediction = prediction.transpose(1,2).contiguous()
prediction = prediction.view(batch_size, grid_size*grid_size*num_anchors, bbox_attrs)
#Sigmoid the centre_X, centre_Y. and object confidencce
prediction[:,:,0] = torch.sigmoid(prediction[:,:,0])
prediction[:,:,1] = torch.sigmoid(prediction[:,:,1])
prediction[:,:,4] = torch.sigmoid(prediction[:,:,4])
#Add the center offsets
grid_len = np.arange(grid_size)
a,b = np.meshgrid(grid_len, grid_len)
x_offset = torch.FloatTensor(a).view(-1,1)
y_offset = torch.FloatTensor(b).view(-1,1)
if CUDA:
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1,num_anchors).view(-1,2).unsqueeze(0)
prediction[:,:,:2] += x_y_offset
anchors = torch.FloatTensor(anchors)
if CUDA:
anchors = anchors.cuda()
#Transform the anchors using log opearation given in research paper
anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)
prediction[:,:,2:4] = torch.exp(prediction[:,:,2:4])*anchors
#Softmax the class scores
prediction[:,:,5: 5 + num_classes] = torch.sigmoid((prediction[:,:, 5 : 5 + num_classes]))
prediction[:,:,:4] *= stride
return prediction
def load_classes(namesfile):
fp = open(namesfile, "r")
names = fp.read().split("\n")[:-1]
return names
def get_im_dim(im):
im = cv2.imread(im)
w,h = im.shape[1], im.shape[0]
return w,h
def unique(tensor):
tensor_np = tensor.cpu().numpy()
unique_np = np.unique(tensor_np)
unique_tensor = torch.from_numpy(unique_np)
tensor_res = tensor.new(unique_tensor.shape)
tensor_res.copy_(unique_tensor)
return tensor_res
def write_results(prediction, confidence, num_classes, nms = True, nms_conf = 0.4):
conf_mask = (prediction[:,:,4] > confidence).float().unsqueeze(2)
prediction = prediction*conf_mask
try:
ind_nz = torch.nonzero(prediction[:,:,4]).transpose(0,1).contiguous()
except:
return 0
box_a = prediction.new(prediction.shape)
box_a[:,:,0] = (prediction[:,:,0] - prediction[:,:,2]/2)
box_a[:,:,1] = (prediction[:,:,1] - prediction[:,:,3]/2)
box_a[:,:,2] = (prediction[:,:,0] + prediction[:,:,2]/2)
box_a[:,:,3] = (prediction[:,:,1] + prediction[:,:,3]/2)
prediction[:,:,:4] = box_a[:,:,:4]
batch_size = prediction.size(0)
output = prediction.new(1, prediction.size(2) + 1)
write = False
for ind in range(batch_size):
#select the image from the batch
image_pred = prediction[ind]
#Get the class having maximum score, and the index of that class
#Get rid of num_classes softmax scores
#Add the class index and the class score of class having maximum score
max_conf, max_conf_score = torch.max(image_pred[:,5:5+ num_classes], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (image_pred[:,:5], max_conf, max_conf_score)
image_pred = torch.cat(seq, 1)
#Get rid of the zero entries
non_zero_ind = (torch.nonzero(image_pred[:,4]))
image_pred_ = image_pred[non_zero_ind.squeeze(),:].view(-1,7)
#Get the various classes detected in the image
try:
img_classes = unique(image_pred_[:,-1])
except:
continue
#WE will do NMS classwise
for cls in img_classes:
#get the detections with one particular class
cls_mask = image_pred_*(image_pred_[:,-1] == cls).float().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:,-2]).squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1,7)
#sort the detections such that the entry with the maximum objectness
#confidence is at the top
conf_sort_index = torch.sort(image_pred_class[:,4], descending = True )[1]
image_pred_class = image_pred_class[conf_sort_index]
idx = image_pred_class.size(0)
#if nms has to be done
if nms:
#For each detection
for i in range(idx):
#Get the IOUs of all boxes that come after the one we are looking at
#in the loop
try:
ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i+1:])
except ValueError:
break
except IndexError:
break
#Zero out all the detections that have IoU > treshhold
iou_mask = (ious < nms_conf).float().unsqueeze(1)
image_pred_class[i+1:] *= iou_mask
#Remove the non-zero entries
non_zero_ind = torch.nonzero(image_pred_class[:,4]).squeeze()
image_pred_class = image_pred_class[non_zero_ind].view(-1,7)
#Concatenate the batch_id of the image to the detection
#this helps us identify which image does the detection correspond to
#We use a linear straucture to hold ALL the detections from the batch
#the batch_dim is flattened
#batch is identified by extra batch column
batch_ind = image_pred_class.new(image_pred_class.size(0), 1).fill_(ind)
seq = batch_ind, image_pred_class
if not write:
output = torch.cat(seq,1)
write = True
else:
out = torch.cat(seq,1)
output = torch.cat((output,out))
return output
| 33.24898 | 195 | 0.599558 |
eb7e63a079b6be743c8430f9f91993305486f444 | 1,019 | py | Python | example/aseqdump_test.py | K4zuki/EtherCAT_Master | b5e2dd2f87ee9a0c520201eb13f6516ae9adf787 | [
"MIT"
] | null | null | null | example/aseqdump_test.py | K4zuki/EtherCAT_Master | b5e2dd2f87ee9a0c520201eb13f6516ae9adf787 | [
"MIT"
] | null | null | null | example/aseqdump_test.py | K4zuki/EtherCAT_Master | b5e2dd2f87ee9a0c520201eb13f6516ae9adf787 | [
"MIT"
] | null | null | null |
import aseqdump
import threading
data = 0
Ch =0
def Note():
midi = aseqdump.aseqdump("24:0")
while 1:
onoff,key,velocity = midi.Note_get()
if(onoff == ""):
continue
print("Note: %s , %s , %s" % (onoff,key,velocity))
def Control():
global Ch
midi = aseqdump.aseqdump("24:2")
while 1:
Ch,value = midi.Control_get()
if(Ch == ""):
continue
print("Control: %s , %s" % (Ch,value))
def Pitch():
midi = aseqdump.aseqdump("24:1")
while 1:
Ch,value = midi.Pitch_get()
if(Ch == ""):
continue
print("Pitch: %s , %s" % (Ch,value))
thread_1 = threading.Thread(target=Note)
thread_1.start()
"""
thread_2 = threading.Thread(target=Control)
thread_2.start()
thread_3 = threading.Thread(target=Pitch)
thread_3.start()
"""
while 1:
pass
#print("0x%04X" % data)
#midi2 = aseqdump.aseqdump("24:2")
#Ch,value = midi2.Control_get()
#print("Control2: %s , %s " % (Ch,value))
| 19.226415 | 58 | 0.558391 |
b4226dead7edb8f3846eb2f2dc9275b5a24a8359 | 21,771 | py | Python | scripts/train_detection.py | urasakikeisuke/seamseg | 2b3a6d8aaaa895df4949e263e97c2f8b83332b88 | [
"BSD-3-Clause"
] | 282 | 2019-06-07T11:37:01.000Z | 2022-03-19T05:43:02.000Z | scripts/train_detection.py | urasakikeisuke/seamseg | 2b3a6d8aaaa895df4949e263e97c2f8b83332b88 | [
"BSD-3-Clause"
] | 32 | 2019-07-02T10:39:03.000Z | 2022-03-10T14:10:13.000Z | scripts/train_detection.py | urasakikeisuke/seamseg | 2b3a6d8aaaa895df4949e263e97c2f8b83332b88 | [
"BSD-3-Clause"
] | 56 | 2019-07-24T02:31:37.000Z | 2022-01-07T16:19:50.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import shutil
import time
from collections import OrderedDict
from os import path
import tensorboardX as tensorboard
import torch
import torch.optim as optim
import torch.utils.data as data
from torch import distributed
import seamseg.models as models
import seamseg.utils.coco_ap as coco_ap
from seamseg.algos.detection import PredictionGenerator, ProposalMatcher, DetectionLoss
from seamseg.algos.fpn import DetectionAlgoFPN, RPNAlgoFPN
from seamseg.algos.rpn import AnchorMatcher, ProposalGenerator, RPNLoss
from seamseg.config import load_config, DEFAULTS as DEFAULT_CONFIGS
from seamseg.data import ISSTransform, ISSDataset, iss_collate_fn
from seamseg.data.sampler import DistributedARBatchSampler
from seamseg.models.detection import DetectionNet, NETWORK_INPUTS
from seamseg.modules.fpn import FPN, FPNBody
from seamseg.modules.heads import RPNHead, FPNROIHead
from seamseg.utils import logging
from seamseg.utils.meters import AverageMeter
from seamseg.utils.misc import config_to_string, scheduler_from_config, norm_act_from_config, freeze_params, \
all_reduce_losses, NORM_LAYERS, OTHER_LAYERS
from seamseg.utils.parallel import DistributedDataParallel
from seamseg.utils.snapshot import save_snapshot, resume_from_snapshot, pre_train_from_snapshots
parser = argparse.ArgumentParser(description="Detection training script")
parser.add_argument("--local_rank", type=int)
parser.add_argument("--log_dir", type=str, default=".", help="Write logs to the given directory")
parser.add_argument("--resume", metavar="FILE", type=str, help="Resume training from given file")
parser.add_argument("--eval", action="store_true", help="Do a single validation run")
parser.add_argument("--pre_train", metavar="FILE", type=str, nargs="*",
help="Start from the given pre-trained snapshots, overwriting each with the next one in the list. "
"Snapshots can be given in the format '{module_name}:{path}', where '{module_name} is one of "
"'body', 'rpn_head' or 'roi_head'. In that case only that part of the network "
"will be loaded from the snapshot")
parser.add_argument("config", metavar="FILE", type=str, help="Path to configuration file")
parser.add_argument("data", metavar="DIR", type=str, help="Path to dataset")
def log_debug(msg, *args, **kwargs):
if distributed.get_rank() == 0:
logging.get_logger().debug(msg, *args, **kwargs)
def log_info(msg, *args, **kwargs):
if distributed.get_rank() == 0:
logging.get_logger().info(msg, *args, **kwargs)
def make_config(args):
log_debug("Loading configuration from %s", args.config)
conf = load_config(args.config, DEFAULT_CONFIGS["detection"])
log_debug("\n%s", config_to_string(conf))
return conf
def make_dataloader(args, config, rank, world_size):
config = config["dataloader"]
log_debug("Creating dataloaders for dataset in %s", args.data)
# Training dataloader
train_tf = ISSTransform(config.getint("shortest_size"),
config.getint("longest_max_size"),
config.getstruct("rgb_mean"),
config.getstruct("rgb_std"),
config.getboolean("random_flip"),
config.getstruct("random_scale"))
train_db = ISSDataset(args.data, config["train_set"], train_tf)
train_sampler = DistributedARBatchSampler(
train_db, config.getint("train_batch_size"), world_size, rank, True)
train_dl = data.DataLoader(train_db,
batch_sampler=train_sampler,
collate_fn=iss_collate_fn,
pin_memory=True,
num_workers=config.getint("num_workers"))
# Validation dataloader
val_tf = ISSTransform(config.getint("shortest_size"),
config.getint("longest_max_size"),
config.getstruct("rgb_mean"),
config.getstruct("rgb_std"))
val_db = ISSDataset(args.data, config["val_set"], val_tf)
val_sampler = DistributedARBatchSampler(
val_db, config.getint("val_batch_size"), world_size, rank, False)
val_dl = data.DataLoader(val_db,
batch_sampler=val_sampler,
collate_fn=iss_collate_fn,
pin_memory=True,
num_workers=config.getint("num_workers"))
return train_dl, val_dl
def make_model(config, num_thing, num_stuff):
body_config = config["body"]
fpn_config = config["fpn"]
rpn_config = config["rpn"]
roi_config = config["roi"]
classes = {"total": num_thing + num_stuff, "stuff": num_stuff, "thing": num_thing}
# BN + activation
norm_act_static, norm_act_dynamic = norm_act_from_config(body_config)
# Create backbone
log_debug("Creating backbone model %s", body_config["body"])
body_fn = models.__dict__["net_" + body_config["body"]]
body_params = body_config.getstruct("body_params") if body_config.get("body_params") else {}
body = body_fn(norm_act=norm_act_static, **body_params)
if body_config.get("weights"):
body.load_state_dict(torch.load(body_config["weights"], map_location="cpu"))
# Freeze parameters
for n, m in body.named_modules():
for mod_id in range(1, body_config.getint("num_frozen") + 1):
if ("mod%d" % mod_id) in n:
freeze_params(m)
body_channels = body_config.getstruct("out_channels")
# Create FPN
fpn_inputs = fpn_config.getstruct("inputs")
fpn = FPN([body_channels[inp] for inp in fpn_inputs],
fpn_config.getint("out_channels"),
fpn_config.getint("extra_scales"),
norm_act_static,
fpn_config["interpolation"])
body = FPNBody(body, fpn, fpn_inputs)
# Create RPN
proposal_generator = ProposalGenerator(rpn_config.getfloat("nms_threshold"),
rpn_config.getint("num_pre_nms_train"),
rpn_config.getint("num_post_nms_train"),
rpn_config.getint("num_pre_nms_val"),
rpn_config.getint("num_post_nms_val"),
rpn_config.getint("min_size"))
anchor_matcher = AnchorMatcher(rpn_config.getint("num_samples"),
rpn_config.getfloat("pos_ratio"),
rpn_config.getfloat("pos_threshold"),
rpn_config.getfloat("neg_threshold"),
rpn_config.getfloat("void_threshold"))
rpn_loss = RPNLoss(rpn_config.getfloat("sigma"))
rpn_algo = RPNAlgoFPN(
proposal_generator, anchor_matcher, rpn_loss,
rpn_config.getint("anchor_scale"), rpn_config.getstruct("anchor_ratios"),
fpn_config.getstruct("out_strides"), rpn_config.getint("fpn_min_level"), rpn_config.getint("fpn_levels"))
rpn_head = RPNHead(
fpn_config.getint("out_channels"), len(rpn_config.getstruct("anchor_ratios")), 1,
rpn_config.getint("hidden_channels"), norm_act_dynamic)
# Create detection network
prediction_generator = PredictionGenerator(roi_config.getfloat("nms_threshold"),
roi_config.getfloat("score_threshold"),
roi_config.getint("max_predictions"))
proposal_matcher = ProposalMatcher(classes,
roi_config.getint("num_samples"),
roi_config.getfloat("pos_ratio"),
roi_config.getfloat("pos_threshold"),
roi_config.getfloat("neg_threshold_hi"),
roi_config.getfloat("neg_threshold_lo"),
roi_config.getfloat("void_threshold"))
roi_loss = DetectionLoss(roi_config.getfloat("sigma"))
roi_size = roi_config.getstruct("roi_size")
roi_algo = DetectionAlgoFPN(
prediction_generator, proposal_matcher, roi_loss, classes, roi_config.getstruct("bbx_reg_weights"),
roi_config.getint("fpn_canonical_scale"), roi_config.getint("fpn_canonical_level"), roi_size,
roi_config.getint("fpn_min_level"), roi_config.getint("fpn_levels"))
roi_head = FPNROIHead(fpn_config.getint("out_channels"), classes, roi_size, norm_act=norm_act_dynamic)
# Create final network
return DetectionNet(body, rpn_head, roi_head, rpn_algo, roi_algo, classes)
def make_optimizer(config, model, epoch_length):
body_config = config["body"]
opt_config = config["optimizer"]
sch_config = config["scheduler"]
# Gather parameters from the network
norm_parameters = []
other_parameters = []
for m in model.modules():
if any(isinstance(m, layer) for layer in NORM_LAYERS):
norm_parameters += [p for p in m.parameters() if p.requires_grad]
elif any(isinstance(m, layer) for layer in OTHER_LAYERS):
other_parameters += [p for p in m.parameters() if p.requires_grad]
assert len(norm_parameters) + len(other_parameters) == len([p for p in model.parameters() if p.requires_grad]), \
"Not all parameters that require grad are accounted for in the optimizer"
# Set-up optimizer hyper-parameters
parameters = [
{
"params": norm_parameters,
"lr": opt_config.getfloat("lr") if not body_config.getboolean("bn_frozen") else 0.,
"weight_decay": opt_config.getfloat("weight_decay") if opt_config.getboolean("weight_decay_norm") else 0.
},
{
"params": other_parameters,
"lr": opt_config.getfloat("lr"),
"weight_decay": opt_config.getfloat("weight_decay")
}
]
optimizer = optim.SGD(
parameters, momentum=opt_config.getfloat("momentum"), nesterov=opt_config.getboolean("nesterov"))
scheduler = scheduler_from_config(sch_config, optimizer, epoch_length)
assert sch_config["update_mode"] in ("batch", "epoch")
batch_update = sch_config["update_mode"] == "batch"
total_epochs = sch_config.getint("epochs")
return optimizer, scheduler, batch_update, total_epochs
def train(model, optimizer, scheduler, dataloader, meters, **varargs):
model.train()
dataloader.batch_sampler.set_epoch(varargs["epoch"])
optimizer.zero_grad()
global_step = varargs["global_step"]
loss_weights = varargs["loss_weights"]
data_time_meter = AverageMeter((), meters["loss"].momentum)
batch_time_meter = AverageMeter((), meters["loss"].momentum)
data_time = time.time()
for it, batch in enumerate(dataloader):
# Upload batch
batch = {k: batch[k].cuda(device=varargs["device"], non_blocking=True) for k in NETWORK_INPUTS}
data_time_meter.update(torch.tensor(time.time() - data_time))
# Update scheduler
global_step += 1
if varargs["batch_update"]:
scheduler.step(global_step)
batch_time = time.time()
# Run network
losses, _ = model(**batch, do_loss=True, do_prediction=False)
distributed.barrier()
losses = OrderedDict((k, v.mean()) for k, v in losses.items())
losses["loss"] = sum(w * l for w, l in zip(loss_weights, losses.values()))
optimizer.zero_grad()
losses["loss"].backward()
optimizer.step()
# Gather stats from all workers
losses = all_reduce_losses(losses)
# Update meters
with torch.no_grad():
for loss_name, loss_value in losses.items():
meters[loss_name].update(loss_value.cpu())
batch_time_meter.update(torch.tensor(time.time() - batch_time))
# Clean-up
del batch, losses
# Log
if varargs["summary"] is not None and (it + 1) % varargs["log_interval"] == 0:
logging.iteration(
varargs["summary"], "train", global_step,
varargs["epoch"] + 1, varargs["num_epochs"],
it + 1, len(dataloader),
OrderedDict([
("lr", scheduler.get_lr()[0]),
("loss", meters["loss"]),
("obj_loss", meters["obj_loss"]),
("bbx_loss", meters["bbx_loss"]),
("roi_cls_loss", meters["roi_cls_loss"]),
("roi_bbx_loss", meters["roi_bbx_loss"]),
("data_time", data_time_meter),
("batch_time", batch_time_meter)
])
)
data_time = time.time()
return global_step
def validate(model, dataloader, loss_weights, **varargs):
model.eval()
dataloader.batch_sampler.set_epoch(varargs["epoch"])
num_stuff = dataloader.dataset.num_stuff
loss_meter = AverageMeter(())
data_time_meter = AverageMeter(())
batch_time_meter = AverageMeter(())
# Accumulators for ap and panoptic computation
coco_struct = []
img_list = []
data_time = time.time()
for it, batch in enumerate(dataloader):
with torch.no_grad():
idxs = batch["idx"]
batch_sizes = [img.shape[-2:] for img in batch["img"]]
original_sizes = batch["size"]
# Upload batch
batch = {k: batch[k].cuda(device=varargs["device"], non_blocking=True) for k in NETWORK_INPUTS}
data_time_meter.update(torch.tensor(time.time() - data_time))
batch_time = time.time()
# Run network
losses, pred = model(**batch, do_loss=True, do_prediction=True)
losses = OrderedDict((k, v.mean()) for k, v in losses.items())
losses = all_reduce_losses(losses)
loss = sum(w * l for w, l in zip(loss_weights, losses.values()))
# Update meters
loss_meter.update(loss.cpu())
batch_time_meter.update(torch.tensor(time.time() - batch_time))
del loss, losses
# Accumulate COCO AP and panoptic predictions
for i, (bbx_pred, cls_pred, obj_pred) in enumerate(
zip(pred["bbx_pred"], pred["cls_pred"], pred["obj_pred"])):
# If there are no detections skip this image
if bbx_pred is None:
continue
# COCO AP
coco_struct += coco_ap.process_prediction(
bbx_pred, cls_pred + num_stuff, obj_pred, None, batch_sizes[i], idxs[i], original_sizes[i])
img_list.append(idxs[i])
del pred, batch
# Log batch
if varargs["summary"] is not None and (it + 1) % varargs["log_interval"] == 0:
logging.iteration(
None, "val", varargs["global_step"],
varargs["epoch"] + 1, varargs["num_epochs"],
it + 1, len(dataloader),
OrderedDict([
("loss", loss_meter),
("data_time", data_time_meter),
("batch_time", batch_time_meter)
])
)
data_time = time.time()
# Finalize AP computation
det_map, _ = coco_ap.summarize_mp(coco_struct, varargs["coco_gt"], img_list, varargs["log_dir"], False)
# Log results
log_info("Validation done")
if varargs["summary"] is not None:
logging.iteration(
varargs["summary"], "val", varargs["global_step"],
varargs["epoch"] + 1, varargs["num_epochs"],
len(dataloader), len(dataloader),
OrderedDict([
("loss", loss_meter.mean.item()),
("det_map", det_map),
("data_time", data_time_meter.mean.item()),
("batch_time", batch_time_meter.mean.item())
])
)
return det_map
def main(args):
# Initialize multi-processing
distributed.init_process_group(backend='nccl', init_method='env://')
device_id, device = args.local_rank, torch.device(args.local_rank)
rank, world_size = distributed.get_rank(), distributed.get_world_size()
torch.cuda.set_device(device_id)
# Initialize logging
if rank == 0:
logging.init(args.log_dir, "training" if not args.eval else "eval")
summary = tensorboard.SummaryWriter(args.log_dir)
else:
summary = None
# Load configuration
config = make_config(args)
# Create dataloaders
train_dataloader, val_dataloader = make_dataloader(args, config, rank, world_size)
# Create model
model = make_model(config, train_dataloader.dataset.num_thing, train_dataloader.dataset.num_stuff)
if args.resume:
assert not args.pre_train, "resume and pre_train are mutually exclusive"
log_debug("Loading snapshot from %s", args.resume)
snapshot = resume_from_snapshot(model, args.resume, ["body", "rpn_head", "roi_head"])
elif args.pre_train:
assert not args.resume, "resume and pre_train are mutually exclusive"
log_debug("Loading pre-trained model from %s", args.pre_train)
pre_train_from_snapshots(model, args.pre_train, ["body", "rpn_head", "roi_head"])
else:
assert not args.eval, "--resume is needed in eval mode"
snapshot = None
# Init GPU stuff
torch.backends.cudnn.benchmark = config["general"].getboolean("cudnn_benchmark")
model = DistributedDataParallel(model.cuda(device), device_ids=[device_id], output_device=device_id,
find_unused_parameters=True)
# Create optimizer
optimizer, scheduler, batch_update, total_epochs = make_optimizer(config, model, len(train_dataloader))
if args.resume:
optimizer.load_state_dict(snapshot["state_dict"]["optimizer"])
# Training loop
momentum = 1. - 1. / len(train_dataloader)
meters = {
"loss": AverageMeter((), momentum),
"obj_loss": AverageMeter((), momentum),
"bbx_loss": AverageMeter((), momentum),
"roi_cls_loss": AverageMeter((), momentum),
"roi_bbx_loss": AverageMeter((), momentum)
}
if args.resume:
starting_epoch = snapshot["training_meta"]["epoch"] + 1
best_score = snapshot["training_meta"]["best_score"]
global_step = snapshot["training_meta"]["global_step"]
for name, meter in meters.items():
meter.load_state_dict(snapshot["state_dict"][name + "_meter"])
del snapshot
else:
starting_epoch = 0
best_score = 0
global_step = 0
# Optional: evaluation only:
if args.eval:
log_info("Validating epoch %d", starting_epoch - 1)
validate(model, val_dataloader, config["optimizer"].getstruct("loss_weights"),
device=device, summary=summary, global_step=global_step,
epoch=starting_epoch - 1, num_epochs=total_epochs,
log_interval=config["general"].getint("log_interval"),
coco_gt=config["dataloader"]["coco_gt"], log_dir=args.log_dir)
exit(0)
for epoch in range(starting_epoch, total_epochs):
log_info("Starting epoch %d", epoch + 1)
if not batch_update:
scheduler.step(epoch)
# Run training epoch
global_step = train(model, optimizer, scheduler, train_dataloader, meters,
batch_update=batch_update, epoch=epoch, summary=summary, device=device,
log_interval=config["general"].getint("log_interval"), num_epochs=total_epochs,
global_step=global_step, loss_weights=config["optimizer"].getstruct("loss_weights"))
# Save snapshot (only on rank 0)
if rank == 0:
snapshot_file = path.join(args.log_dir, "model_last.pth.tar")
log_debug("Saving snapshot to %s", snapshot_file)
meters_out_dict = {k + "_meter": v.state_dict() for k, v in meters.items()}
save_snapshot(snapshot_file, config, epoch, 0, best_score, global_step,
body=model.module.body.state_dict(),
rpn_head=model.module.rpn_head.state_dict(),
roi_head=model.module.roi_head.state_dict(),
optimizer=optimizer.state_dict(),
**meters_out_dict)
if (epoch + 1) % config["general"].getint("val_interval") == 0:
log_info("Validating epoch %d", epoch + 1)
score = validate(model, val_dataloader, config["optimizer"].getstruct("loss_weights"),
device=device, summary=summary, global_step=global_step,
epoch=epoch, num_epochs=total_epochs,
log_interval=config["general"].getint("log_interval"),
coco_gt=config["dataloader"]["coco_gt"], log_dir=args.log_dir)
# Update the score on the last saved snapshot
if rank == 0:
snapshot = torch.load(snapshot_file, map_location="cpu")
snapshot["training_meta"]["last_score"] = score
torch.save(snapshot, snapshot_file)
del snapshot
if score > best_score:
best_score = score
if rank == 0:
shutil.copy(snapshot_file, path.join(args.log_dir, "model_best.pth.tar"))
if __name__ == "__main__":
main(parser.parse_args())
| 43.110891 | 119 | 0.617105 |
281d530c7ed955310f981929cc5ad6876b02d758 | 39,047 | py | Python | openstackclient/tests/unit/identity/v3/test_role.py | alvarosimon/python-openstackclient | 2ab3396f19796935ddcb281b865d37839a4f84f7 | [
"Apache-2.0"
] | 1 | 2018-04-23T20:59:31.000Z | 2018-04-23T20:59:31.000Z | openstackclient/tests/unit/identity/v3/test_role.py | adgeese/python-openstackclient | 06263bd5852aad9cd03a76f50140fbbb2d0751ba | [
"Apache-2.0"
] | null | null | null | openstackclient/tests/unit/identity/v3/test_role.py | adgeese/python-openstackclient | 06263bd5852aad9cd03a76f50140fbbb2d0751ba | [
"Apache-2.0"
] | 1 | 2020-07-21T02:18:23.000Z | 2020-07-21T02:18:23.000Z | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.identity.v3 import role
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
class TestRole(identity_fakes.TestIdentityv3):
def setUp(self):
super(TestRole, self).setUp()
# Get a shortcut to the UserManager Mock
self.users_mock = self.app.client_manager.identity.users
self.users_mock.reset_mock()
# Get a shortcut to the UserManager Mock
self.groups_mock = self.app.client_manager.identity.groups
self.groups_mock.reset_mock()
# Get a shortcut to the DomainManager Mock
self.domains_mock = self.app.client_manager.identity.domains
self.domains_mock.reset_mock()
# Get a shortcut to the ProjectManager Mock
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
# Get a shortcut to the RoleManager Mock
self.roles_mock = self.app.client_manager.identity.roles
self.roles_mock.reset_mock()
def _is_inheritance_testcase(self):
return False
class TestRoleInherited(TestRole):
def _is_inheritance_testcase(self):
return True
class TestRoleAdd(TestRole):
def setUp(self):
super(TestRoleAdd, self).setUp()
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
self.groups_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.GROUP),
loaded=True,
)
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.DOMAIN),
loaded=True,
)
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
)
self.roles_mock.grant.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
)
# Get the command object to test
self.cmd = role.AddRole(self.app, None)
def test_role_add_user_domain(self):
arglist = [
'--user', identity_fakes.user_name,
'--domain', identity_fakes.domain_name,
identity_fakes.role_name,
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', identity_fakes.user_name),
('group', None),
('domain', identity_fakes.domain_name),
('project', None),
('role', identity_fakes.role_name),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'user': identity_fakes.user_id,
'domain': identity_fakes.domain_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.grant(role, user=, group=, domain=, project=)
self.roles_mock.grant.assert_called_with(
identity_fakes.role_id,
**kwargs
)
self.assertIsNone(result)
def test_role_add_user_project(self):
arglist = [
'--user', identity_fakes.user_name,
'--project', identity_fakes.project_name,
identity_fakes.role_name,
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', identity_fakes.user_name),
('group', None),
('domain', None),
('project', identity_fakes.project_name),
('role', identity_fakes.role_name),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'user': identity_fakes.user_id,
'project': identity_fakes.project_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.grant(role, user=, group=, domain=, project=)
self.roles_mock.grant.assert_called_with(
identity_fakes.role_id,
**kwargs
)
self.assertIsNone(result)
def test_role_add_group_domain(self):
arglist = [
'--group', identity_fakes.group_name,
'--domain', identity_fakes.domain_name,
identity_fakes.role_name,
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', None),
('group', identity_fakes.group_name),
('domain', identity_fakes.domain_name),
('project', None),
('role', identity_fakes.role_name),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'group': identity_fakes.group_id,
'domain': identity_fakes.domain_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.grant(role, user=, group=, domain=, project=)
self.roles_mock.grant.assert_called_with(
identity_fakes.role_id,
**kwargs
)
self.assertIsNone(result)
def test_role_add_group_project(self):
arglist = [
'--group', identity_fakes.group_name,
'--project', identity_fakes.project_name,
identity_fakes.role_name,
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', None),
('group', identity_fakes.group_name),
('domain', None),
('project', identity_fakes.project_name),
('role', identity_fakes.role_name),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'group': identity_fakes.group_id,
'project': identity_fakes.project_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.grant(role, user=, group=, domain=, project=)
self.roles_mock.grant.assert_called_with(
identity_fakes.role_id,
**kwargs
)
self.assertIsNone(result)
def test_role_add_domain_role_on_user_project(self):
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE_2),
loaded=True,
)
arglist = [
'--user', identity_fakes.user_name,
'--project', identity_fakes.project_name,
'--role-domain', identity_fakes.domain_name,
identity_fakes.ROLE_2['name'],
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', identity_fakes.user_name),
('group', None),
('domain', None),
('project', identity_fakes.project_name),
('role', identity_fakes.ROLE_2['name']),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'user': identity_fakes.user_id,
'project': identity_fakes.project_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.grant(role, user=, group=, domain=, project=)
self.roles_mock.grant.assert_called_with(
identity_fakes.ROLE_2['id'],
**kwargs
)
self.assertIsNone(result)
def test_role_add_with_error(self):
arglist = [
identity_fakes.role_name,
]
verifylist = [
('user', None),
('group', None),
('domain', None),
('project', None),
('role', identity_fakes.role_name),
('inherited', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action, parsed_args)
class TestRoleAddInherited(TestRoleAdd, TestRoleInherited):
pass
class TestRoleCreate(TestRole):
def setUp(self):
super(TestRoleCreate, self).setUp()
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.DOMAIN),
loaded=True,
)
self.roles_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
)
# Get the command object to test
self.cmd = role.CreateRole(self.app, None)
def test_role_create_no_options(self):
arglist = [
identity_fakes.role_name,
]
verifylist = [
('name', identity_fakes.role_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'domain': None,
'name': identity_fakes.role_name,
}
# RoleManager.create(name=, domain=)
self.roles_mock.create.assert_called_with(
**kwargs
)
collist = ('domain', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
None,
identity_fakes.role_id,
identity_fakes.role_name,
)
self.assertEqual(datalist, data)
def test_role_create_with_domain(self):
self.roles_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE_2),
loaded=True,
)
arglist = [
'--domain', identity_fakes.domain_name,
identity_fakes.ROLE_2['name'],
]
verifylist = [
('domain', identity_fakes.domain_name),
('name', identity_fakes.ROLE_2['name']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'domain': identity_fakes.domain_id,
'name': identity_fakes.ROLE_2['name'],
}
# RoleManager.create(name=, domain=)
self.roles_mock.create.assert_called_with(
**kwargs
)
collist = ('domain', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.domain_id,
identity_fakes.ROLE_2['id'],
identity_fakes.ROLE_2['name'],
)
self.assertEqual(datalist, data)
class TestRoleDelete(TestRole):
def setUp(self):
super(TestRoleDelete, self).setUp()
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
)
self.roles_mock.delete.return_value = None
# Get the command object to test
self.cmd = role.DeleteRole(self.app, None)
def test_role_delete_no_options(self):
arglist = [
identity_fakes.role_name,
]
verifylist = [
('roles', [identity_fakes.role_name]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.roles_mock.delete.assert_called_with(
identity_fakes.role_id,
)
self.assertIsNone(result)
def test_role_delete_with_domain(self):
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE_2),
loaded=True,
)
self.roles_mock.delete.return_value = None
arglist = [
'--domain', identity_fakes.domain_name,
identity_fakes.ROLE_2['name'],
]
verifylist = [
('roles', [identity_fakes.ROLE_2['name']]),
('domain', identity_fakes.domain_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.roles_mock.delete.assert_called_with(
identity_fakes.ROLE_2['id'],
)
self.assertIsNone(result)
@mock.patch.object(utils, 'find_resource')
def test_delete_multi_roles_with_exception(self, find_mock):
find_mock.side_effect = [self.roles_mock.get.return_value,
exceptions.CommandError]
arglist = [
identity_fakes.role_name,
'unexist_role',
]
verifylist = [
('roles', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual('1 of 2 roles failed to delete.',
str(e))
find_mock.assert_any_call(self.roles_mock,
identity_fakes.role_name,
domain_id=None)
find_mock.assert_any_call(self.roles_mock,
'unexist_role',
domain_id=None)
self.assertEqual(2, find_mock.call_count)
self.roles_mock.delete.assert_called_once_with(identity_fakes.role_id)
class TestRoleList(TestRole):
columns = (
'ID',
'Name',
)
datalist = (
(
identity_fakes.role_id,
identity_fakes.role_name,
),
)
def setUp(self):
super(TestRoleList, self).setUp()
self.roles_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
),
]
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.DOMAIN),
loaded=True,
)
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
self.groups_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.GROUP),
loaded=True,
)
# Get the command object to test
self.cmd = role.ListRole(self.app, None)
def test_role_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
self.roles_mock.list.assert_called_with()
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
def test_user_list_inherited(self):
arglist = [
'--user', identity_fakes.user_id,
'--inherited',
]
verifylist = [
('user', identity_fakes.user_id),
('inherited', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'domain': 'default',
'user': self.users_mock.get(),
'os_inherit_extension_inherited': True,
}
# RoleManager.list(user=, group=, domain=, project=, **kwargs)
self.roles_mock.list.assert_called_with(
**kwargs
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
def test_user_list_user(self):
arglist = [
'--user', identity_fakes.user_id,
]
verifylist = [
('user', identity_fakes.user_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'domain': 'default',
'user': self.users_mock.get(),
'os_inherit_extension_inherited': False
}
# RoleManager.list(user=, group=, domain=, project=, **kwargs)
self.roles_mock.list.assert_called_with(
**kwargs
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
def test_role_list_domain_user(self):
arglist = [
'--domain', identity_fakes.domain_name,
'--user', identity_fakes.user_id,
]
verifylist = [
('domain', identity_fakes.domain_name),
('user', identity_fakes.user_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'domain': self.domains_mock.get(),
'user': self.users_mock.get(),
'os_inherit_extension_inherited': False
}
# RoleManager.list(user=, group=, domain=, project=, **kwargs)
self.roles_mock.list.assert_called_with(
**kwargs
)
collist = ('ID', 'Name', 'Domain', 'User')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.role_id,
identity_fakes.role_name,
identity_fakes.domain_name,
identity_fakes.user_name,
), )
self.assertEqual(datalist, tuple(data))
def test_role_list_domain_group(self):
arglist = [
'--domain', identity_fakes.domain_name,
'--group', identity_fakes.group_id,
]
verifylist = [
('domain', identity_fakes.domain_name),
('group', identity_fakes.group_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'domain': self.domains_mock.get(),
'group': self.groups_mock.get(),
'os_inherit_extension_inherited': False
}
# RoleManager.list(user=, group=, domain=, project=, **kwargs)
self.roles_mock.list.assert_called_with(
**kwargs
)
collist = ('ID', 'Name', 'Domain', 'Group')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.role_id,
identity_fakes.role_name,
identity_fakes.domain_name,
identity_fakes.group_name,
), )
self.assertEqual(datalist, tuple(data))
def test_role_list_project_user(self):
arglist = [
'--project', identity_fakes.project_name,
'--user', identity_fakes.user_id,
]
verifylist = [
('project', identity_fakes.project_name),
('user', identity_fakes.user_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'project': self.projects_mock.get(),
'user': self.users_mock.get(),
'os_inherit_extension_inherited': False
}
# RoleManager.list(user=, group=, domain=, project=, **kwargs)
self.roles_mock.list.assert_called_with(
**kwargs
)
collist = ('ID', 'Name', 'Project', 'User')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.role_id,
identity_fakes.role_name,
identity_fakes.project_name,
identity_fakes.user_name,
), )
self.assertEqual(datalist, tuple(data))
def test_role_list_project_group(self):
arglist = [
'--project', identity_fakes.project_name,
'--group', identity_fakes.group_id,
]
verifylist = [
('project', identity_fakes.project_name),
('group', identity_fakes.group_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'project': self.projects_mock.get(),
'group': self.groups_mock.get(),
'os_inherit_extension_inherited': False
}
# RoleManager.list(user=, group=, domain=, project=, **kwargs)
self.roles_mock.list.assert_called_with(
**kwargs
)
collist = ('ID', 'Name', 'Project', 'Group')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.role_id,
identity_fakes.role_name,
identity_fakes.project_name,
identity_fakes.group_name,
), )
self.assertEqual(datalist, tuple(data))
def test_role_list_domain_role(self):
self.roles_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE_2),
loaded=True,
),
]
arglist = [
'--domain', identity_fakes.domain_name,
]
verifylist = [
('domain', identity_fakes.domain_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'domain_id': identity_fakes.domain_id
}
# RoleManager.list(user=, group=, domain=, project=, **kwargs)
self.roles_mock.list.assert_called_with(
**kwargs
)
collist = ('ID', 'Name', 'Domain')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.ROLE_2['id'],
identity_fakes.ROLE_2['name'],
identity_fakes.domain_name,
), )
self.assertEqual(datalist, tuple(data))
def test_role_list_group_with_error(self):
arglist = [
'--group', identity_fakes.group_id,
]
verifylist = [
('group', identity_fakes.group_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action, parsed_args)
class TestRoleRemove(TestRole):
def setUp(self):
super(TestRoleRemove, self).setUp()
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
self.groups_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.GROUP),
loaded=True,
)
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.DOMAIN),
loaded=True,
)
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
)
self.roles_mock.revoke.return_value = None
# Get the command object to test
self.cmd = role.RemoveRole(self.app, None)
def test_role_remove_user_domain(self):
arglist = [
'--user', identity_fakes.user_name,
'--domain', identity_fakes.domain_name,
identity_fakes.role_name,
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', identity_fakes.user_name),
('group', None),
('domain', identity_fakes.domain_name),
('project', None),
('role', identity_fakes.role_name),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'user': identity_fakes.user_id,
'domain': identity_fakes.domain_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.revoke(role, user=, group=, domain=, project=)
self.roles_mock.revoke.assert_called_with(
identity_fakes.role_id,
**kwargs
)
self.assertIsNone(result)
def test_role_remove_user_project(self):
arglist = [
'--user', identity_fakes.user_name,
'--project', identity_fakes.project_name,
identity_fakes.role_name,
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', identity_fakes.user_name),
('group', None),
('domain', None),
('project', identity_fakes.project_name),
('role', identity_fakes.role_name),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'user': identity_fakes.user_id,
'project': identity_fakes.project_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.revoke(role, user=, group=, domain=, project=)
self.roles_mock.revoke.assert_called_with(
identity_fakes.role_id,
**kwargs
)
self.assertIsNone(result)
def test_role_remove_group_domain(self):
arglist = [
'--group', identity_fakes.group_name,
'--domain', identity_fakes.domain_name,
identity_fakes.role_name,
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', None),
('group', identity_fakes.group_name),
('domain', identity_fakes.domain_name),
('project', None),
('role', identity_fakes.role_name),
('role', identity_fakes.role_name),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'group': identity_fakes.group_id,
'domain': identity_fakes.domain_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.revoke(role, user=, group=, domain=, project=)
self.roles_mock.revoke.assert_called_with(
identity_fakes.role_id,
**kwargs
)
self.assertIsNone(result)
def test_role_remove_group_project(self):
arglist = [
'--group', identity_fakes.group_name,
'--project', identity_fakes.project_name,
identity_fakes.role_name,
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', None),
('group', identity_fakes.group_name),
('domain', None),
('project', identity_fakes.project_name),
('role', identity_fakes.role_name),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'group': identity_fakes.group_id,
'project': identity_fakes.project_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.revoke(role, user=, group=, domain=, project=)
self.roles_mock.revoke.assert_called_with(
identity_fakes.role_id,
**kwargs
)
self.assertIsNone(result)
def test_role_remove_domain_role_on_group_domain(self):
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE_2),
loaded=True,
)
arglist = [
'--group', identity_fakes.group_name,
'--domain', identity_fakes.domain_name,
identity_fakes.ROLE_2['name'],
]
if self._is_inheritance_testcase():
arglist.append('--inherited')
verifylist = [
('user', None),
('group', identity_fakes.group_name),
('domain', identity_fakes.domain_name),
('project', None),
('role', identity_fakes.ROLE_2['name']),
('inherited', self._is_inheritance_testcase()),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'group': identity_fakes.group_id,
'domain': identity_fakes.domain_id,
'os_inherit_extension_inherited': self._is_inheritance_testcase(),
}
# RoleManager.revoke(role, user=, group=, domain=, project=)
self.roles_mock.revoke.assert_called_with(
identity_fakes.ROLE_2['id'],
**kwargs
)
self.assertIsNone(result)
def test_role_remove_with_error(self):
arglist = [
identity_fakes.role_name,
]
verifylist = [
('user', None),
('group', None),
('domain', None),
('project', None),
('role', identity_fakes.role_name),
('inherited', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action, parsed_args)
class TestRoleSet(TestRole):
def setUp(self):
super(TestRoleSet, self).setUp()
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
)
self.roles_mock.update.return_value = None
# Get the command object to test
self.cmd = role.SetRole(self.app, None)
def test_role_set_no_options(self):
arglist = [
'--name', 'over',
identity_fakes.role_name,
]
verifylist = [
('name', 'over'),
('role', identity_fakes.role_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': 'over',
}
# RoleManager.update(role, name=)
self.roles_mock.update.assert_called_with(
identity_fakes.role_id,
**kwargs
)
self.assertIsNone(result)
def test_role_set_domain_role(self):
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE_2),
loaded=True,
)
arglist = [
'--name', 'over',
'--domain', identity_fakes.domain_name,
identity_fakes.ROLE_2['name'],
]
verifylist = [
('name', 'over'),
('domain', identity_fakes.domain_name),
('role', identity_fakes.ROLE_2['name']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': 'over',
}
# RoleManager.update(role, name=)
self.roles_mock.update.assert_called_with(
identity_fakes.ROLE_2['id'],
**kwargs
)
self.assertIsNone(result)
class TestRoleShow(TestRole):
def setUp(self):
super(TestRoleShow, self).setUp()
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
)
# Get the command object to test
self.cmd = role.ShowRole(self.app, None)
def test_role_show(self):
arglist = [
identity_fakes.role_name,
]
verifylist = [
('role', identity_fakes.role_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# RoleManager.get(role)
self.roles_mock.get.assert_called_with(
identity_fakes.role_name,
)
collist = ('domain', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
None,
identity_fakes.role_id,
identity_fakes.role_name,
)
self.assertEqual(datalist, data)
def test_role_show_domain_role(self):
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE_2),
loaded=True,
)
arglist = [
'--domain', identity_fakes.domain_name,
identity_fakes.ROLE_2['name'],
]
verifylist = [
('domain', identity_fakes.domain_name),
('role', identity_fakes.ROLE_2['name']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# RoleManager.get(role). This is called from utils.find_resource().
# In fact, the current implementation calls the get(role) first with
# just the name, then with the name+domain_id. So technically we should
# mock this out with a call list, with the first call returning None
# and the second returning the object. However, if we did that we are
# then just testing the current sequencing within the utils method, and
# would become brittle to changes within that method. Hence we just
# check for the first call which is always lookup by name.
self.roles_mock.get.assert_called_with(
identity_fakes.ROLE_2['name'],
)
collist = ('domain', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.domain_id,
identity_fakes.ROLE_2['id'],
identity_fakes.ROLE_2['name'],
)
self.assertEqual(datalist, data)
| 33.006762 | 79 | 0.583297 |
f1fd452f2ffba028d32d33316e91a87ae9839ced | 6,672 | py | Python | src/ui/timeline_label_widget.py | KumarLabJax/JABS-behavior-classifier | 8c038a7510ae08d90418403a723e396344bb671c | [
"FSFAP"
] | null | null | null | src/ui/timeline_label_widget.py | KumarLabJax/JABS-behavior-classifier | 8c038a7510ae08d90418403a723e396344bb671c | [
"FSFAP"
] | null | null | null | src/ui/timeline_label_widget.py | KumarLabJax/JABS-behavior-classifier | 8c038a7510ae08d90418403a723e396344bb671c | [
"FSFAP"
] | null | null | null | import math
import numpy as np
from PySide2.QtCore import QSize, Qt
from PySide2.QtGui import QPainter, QColor, QPen, QPixmap, QBrush
from PySide2.QtWidgets import QWidget, QSizePolicy
from src.project.track_labels import TrackLabels
from .colors import (BEHAVIOR_COLOR, NOT_BEHAVIOR_COLOR, BACKGROUND_COLOR,
POSITION_MARKER_COLOR)
class TimelineLabelWidget(QWidget):
"""
Widget that shows a "zoomed out" overview of labels for the entire video.
Because each pixel along the width ends up representing multiple frames,
you can't see fine detail, but you can see where manual labels have been
applied. This can be useful for seeking through the video to a location of
labeling.
"""
_BEHAVIOR_COLOR = QColor(*BEHAVIOR_COLOR)
_NOT_BEHAVIOR_COLOR = QColor(*NOT_BEHAVIOR_COLOR)
_MIX_COLOR = QColor(144, 102, 132)
_BACKGROUND_COLOR = QColor(*BACKGROUND_COLOR)
_RANGE_COLOR = QColor(*POSITION_MARKER_COLOR)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._bar_height = 8
self._bar_padding = 3
self._height = self._bar_height + 2 * self._bar_padding
self._window_size = 100
self._frames_in_view = 2 * self._window_size + 1
# allow widget to expand horizontally but maintain fixed vertical size
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
# TrackLabels object containing labels for current behavior & identity
self._labels = None
# In order to indicate where the current frame is on the bar,
# we need to know out which element it corresponds to in the downsampled
# array. That maps to a pixel location in the bar. To calculate that
# we will need to know the bin size. This is updated at every resize
# event
self._bin_size = 0
self._pixmap = None
self._pixmap_offset = 0
self._current_frame = 0
self._num_frames = 0
def sizeHint(self):
"""
Override QWidget.sizeHint to give an initial starting size.
Width hint is not so important because we allow the widget to resize
horizontally to fill the available container. The height is fixed,
so the value used here sets the height of the widget.
"""
return QSize(400, self._height)
def resizeEvent(self, event):
"""
handle resize event. Recalculates scaling factors and calls
update_bar() to downsample current label array and rerender the bar
"""
# if no video is loaded, there is nothing to display and nothing to
# resize
if self._num_frames == 0:
return
self._update_scale()
self._update_bar()
def paintEvent(self, event):
""" override QWidget paintEvent """
# make sure we have something to draw
if self._pixmap is None:
return
# get the current position
mapped_position = self._current_frame // self._bin_size
qp = QPainter(self)
# draw a box around what is currently being displayed in the
# ManualLabelWidget
start = mapped_position - (self._window_size // self._bin_size) + self._pixmap_offset
qp.setPen(QPen(self._RANGE_COLOR, 1, Qt.SolidLine))
qp.setBrush(QBrush(self._RANGE_COLOR, Qt.Dense4Pattern))
qp.drawRect(start, 0, self._frames_in_view // self._bin_size,
self.size().height() - 1)
qp.drawPixmap(0 + self._pixmap_offset, 0, self._pixmap)
def set_labels(self, labels):
""" load label track to display """
self._labels = labels
self.update_labels()
def set_current_frame(self, current_frame):
""" called to reposition the view """
self._current_frame = current_frame
self.update()
def set_num_frames(self, num_frames):
""" set the number of frames in the current video """
self._num_frames = num_frames
self._update_scale()
def update_labels(self):
self._update_bar()
self.update()
def _update_bar(self):
"""
Updates the bar pixmap. Downsamples label array with the current size
and updates self._pixmap
"""
width = self.size().width()
height = self.size().height()
# create a pixmap with a width that evenly divides the total number of
# frames so that each pixel along the width represents a bin of frames
# (_update_scale() has done this, we can use pixmap_offset to figure
# out how many pixels of padding will be on each side of the final
# pixmap)
pixmap_width = width - 2 * self._pixmap_offset
self._pixmap = QPixmap(pixmap_width, height)
self._pixmap.fill(Qt.transparent)
if self._labels is not None:
downsampled = self._labels.downsample(self._labels.get_labels(),
pixmap_width)
else:
# if we don't have labels loaded yet, create a dummy array of
# unlabeled frames to display
downsampled = TrackLabels.downsample(
np.full(self._num_frames, TrackLabels.Label.NONE), pixmap_width)
# draw the bar, each pixel along the width corresponds to a value in the
# down sampled label array
qp = QPainter(self._pixmap)
for x in range(pixmap_width):
if downsampled[x] == TrackLabels.Label.NONE.value:
qp.setPen(self._BACKGROUND_COLOR)
elif downsampled[x] == TrackLabels.Label.BEHAVIOR.value:
qp.setPen(self._BEHAVIOR_COLOR)
elif downsampled[x] == TrackLabels.Label.NOT_BEHAVIOR.value:
qp.setPen(self._NOT_BEHAVIOR_COLOR)
elif downsampled[x] == TrackLabels.Label.MIX.value:
# bin contains mix of behavior/not behavior labels
qp.setPen(self._MIX_COLOR)
else:
continue
# draw a vertical bar of pixels
for y in range(self._bar_padding,
self._bar_padding + self._bar_height):
qp.drawPoint(x, y)
qp.end()
def _update_scale(self):
""" update scale factor and bin size """
width = self.size().width()
pad_size = math.ceil(
float(self._num_frames) / width) * width - self._num_frames
self._bin_size = int(self._num_frames + pad_size) // width
padding = (self._bin_size * width - self._num_frames) // self._bin_size
self._pixmap_offset = padding // 2
| 36.861878 | 93 | 0.635342 |
f0cd20efe717895b81c18944fabbba9d4cc3d95d | 794 | py | Python | advent_of_code/common_dependencies/intcode_test.py | rfrazier716/advent_of_code_2019 | 90a9ea4828e54ea542140536e98a5a8737a31e1e | [
"MIT"
] | null | null | null | advent_of_code/common_dependencies/intcode_test.py | rfrazier716/advent_of_code_2019 | 90a9ea4828e54ea542140536e98a5a8737a31e1e | [
"MIT"
] | null | null | null | advent_of_code/common_dependencies/intcode_test.py | rfrazier716/advent_of_code_2019 | 90a9ea4828e54ea542140536e98a5a8737a31e1e | [
"MIT"
] | null | null | null | import intcode
import numpy as np
from pathlib import Path
import re
import timeit
def load_commented_program(file_path):
with open(file_path) as file:
int_code=re.findall("[0-9]+",",".join([line.split("#")[0] for line in file.readlines()])) #pull int code as a string
return np.array([int(num) for num in int_code]) #return int_code as a numpy array
def main():
n_runs=100
computer = intcode.IntcodeComputer(verbose=True)
input_path=Path("speed_test.txt")
int_code=load_commented_program(input_path)
computer.load_memory(int_code)
computer.input(n_runs) #input the number of runs to execute
computer.run()
print(computer.memory)
print("program executed")
#print(computer.program_finished)
if __name__=="__main__":
main() | 28.357143 | 124 | 0.709068 |
b1b1bef236e681a735459d134849b44eadc9ec21 | 228 | py | Python | configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py | mrzhuzhe/mmdetection | c04ca2c2a65500bc248a5d2ab6ace5b15f00064d | [
"Apache-2.0"
] | null | null | null | configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py | mrzhuzhe/mmdetection | c04ca2c2a65500bc248a5d2ab6ace5b15f00064d | [
"Apache-2.0"
] | null | null | null | configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py | mrzhuzhe/mmdetection | c04ca2c2a65500bc248a5d2ab6ace5b15f00064d | [
"Apache-2.0"
] | null | null | null | _base_ = './cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' # noqa: E501
model = dict(
roi_head=dict(
mask_head=dict(
predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
| 38 | 95 | 0.701754 |
7807788df3770e555064b9a84a85f44f739d7be2 | 811 | py | Python | py/server/tests/test_experiments.py | mattrunyon/deephaven-core | 80e3567e4647ab76a81e483d0a8ab542f9aadace | [
"MIT"
] | null | null | null | py/server/tests/test_experiments.py | mattrunyon/deephaven-core | 80e3567e4647ab76a81e483d0a8ab542f9aadace | [
"MIT"
] | null | null | null | py/server/tests/test_experiments.py | mattrunyon/deephaven-core | 80e3567e4647ab76a81e483d0a8ab542f9aadace | [
"MIT"
] | null | null | null | #
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
import unittest
from deephaven import time_table
from deephaven._ugp import ugp_exclusive_lock
from deephaven.experimental import time_window
from tests.testbase import BaseTestCase
class ExperimentalTestCase(BaseTestCase):
def test_time_window(self):
with ugp_exclusive_lock():
source_table = time_table("00:00:00.01").update(["TS=currentTime()"])
t = time_window(source_table, ts_col="TS", window=10**8, bool_col="InWindow")
self.assertEqual("InWindow", t.columns[-1].name)
self.wait_ticking_table_update(t, row_count=20, timeout=60)
self.assertIn("true", t.to_string(1000))
self.assertIn("false", t.to_string(1000))
if __name__ == '__main__':
unittest.main()
| 30.037037 | 89 | 0.711467 |
c928d296e91b444330a7efb9a81b69b536b6a2cf | 923 | py | Python | src/graphql/pyutils/path.py | closeio/graphql-core | 28772a2f3529ac2772e616e24ccb76c085f1e87b | [
"MIT"
] | 590 | 2015-10-06T18:22:49.000Z | 2022-03-22T16:32:17.000Z | src/graphql/pyutils/path.py | closeio/graphql-core | 28772a2f3529ac2772e616e24ccb76c085f1e87b | [
"MIT"
] | 300 | 2015-10-06T18:58:11.000Z | 2022-03-22T14:01:44.000Z | src/graphql/pyutils/path.py | closeio/graphql-core | 28772a2f3529ac2772e616e24ccb76c085f1e87b | [
"MIT"
] | 270 | 2015-10-08T19:47:38.000Z | 2022-03-10T04:17:51.000Z | from typing import Any, List, NamedTuple, Optional, Union
__all__ = ["Path"]
class Path(NamedTuple):
"""A generic path of string or integer indices"""
prev: Any # Optional['Path'] (python/mypy/issues/731)
"""path with the previous indices"""
key: Union[str, int]
"""current index in the path (string or integer)"""
typename: Optional[str]
"""name of the parent type to avoid path ambiguity"""
def add_key(self, key: Union[str, int], typename: Optional[str] = None) -> "Path":
"""Return a new Path containing the given key."""
return Path(self, key, typename)
def as_list(self) -> List[Union[str, int]]:
"""Return a list of the path keys."""
flattened: List[Union[str, int]] = []
append = flattened.append
curr: Path = self
while curr:
append(curr.key)
curr = curr.prev
return flattened[::-1]
| 31.827586 | 86 | 0.605634 |
d98cf6f7bc8059b4391db1a0d609ff94ffd161de | 2,166 | py | Python | conanfile.py | bincrafters/conan-premake_installer | dbc6ac321df648818b5bb139ae345bff01cff217 | [
"MIT"
] | null | null | null | conanfile.py | bincrafters/conan-premake_installer | dbc6ac321df648818b5bb139ae345bff01cff217 | [
"MIT"
] | null | null | null | conanfile.py | bincrafters/conan-premake_installer | dbc6ac321df648818b5bb139ae345bff01cff217 | [
"MIT"
] | null | null | null | from conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild
import os
class PremakeInstallerConan(ConanFile):
name = "premake_installer"
version = "5.0.0-alpha14"
topics = ("conan", "premake", "build", "build-systems")
description = "Describe your software project just once, using Premake's simple and easy to read syntax, and build it everywhere"
url = "https://github.com/bincrafters/conan-premake_installer"
homepage = "https://premake.github.io/"
license = "BSD-3-Clause"
settings = "os_build", "arch_build", "compiler"
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "premake" + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
@property
def _platform(self):
return {'Windows': 'vs2017',
'Linux': 'gmake.unix',
'Macos': 'gmake.macosx'}.get(str(self.settings.os_build))
def build(self):
with tools.chdir(os.path.join(self._source_subfolder, 'build', self._platform)):
if self.settings.os_build == 'Windows':
msbuild = MSBuild(self)
msbuild.build("Premake5.sln", platforms={'x86': 'Win32', 'x86_64': 'x64'}, build_type="Release", arch=self.settings.arch_build)
elif self.settings.os_build == 'Linux':
env_build = AutoToolsBuildEnvironment(self)
env_build.make(args=['config=release'])
elif self.settings.os_build == 'Macos':
env_build = AutoToolsBuildEnvironment(self)
env_build.make(args=['config=release'])
def package(self):
self.copy(pattern="*premake5.exe", dst="bin", keep_path=False)
self.copy(pattern="*premake5", dst="bin", keep_path=False)
def package_info(self):
bindir = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bindir))
self.env_info.PATH.append(bindir)
def package_id(self):
del self.info.settings.compiler
| 42.470588 | 143 | 0.644968 |
9c091a5e3e82528ed81a1298bc5bc6273bcf20f7 | 963 | py | Python | minimal_example.py | shehan/flask-api-demo | 129399d15d576ba85cd985eff098c9e663121b51 | [
"MIT"
] | null | null | null | minimal_example.py | shehan/flask-api-demo | 129399d15d576ba85cd985eff098c9e663121b51 | [
"MIT"
] | null | null | null | minimal_example.py | shehan/flask-api-demo | 129399d15d576ba85cd985eff098c9e663121b51 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
# The route is used to bind a function to a URL
# The route maps a url to some logic (also known as "view function")
@app.route("/")
# A single function can be associated with multiple routes
# @app.route("/home")
# @app.route("/index")
def home():
return "<h1>Hello World!</h1>" \
"<p>This is going to be fun!</p>" \
"<p>Check out the <a href='/about-us'>About Us</a> page.</p>" \
"<p>Here's the <a href='/contact-us'>Contact Us</a> page.</p>"
@app.route("/about-us")
def about():
return "<h1>About Us</h1>" \
"<p>Stuff about us goes here...</p><p><a href='/'>Home Page</a></p>"
@app.route("/contact-us")
def contact():
return "<h1>Contact Us</h1>" \
"<p>Get in touch with us...</p><p><a href='/'>Home Page</a></p>"
if __name__ == "__main__":
app.run(debug=True) # setting debug=True will auto-reload the server when a file is changed
| 29.181818 | 96 | 0.593977 |
0989d8732dc92c53dbcfaea178165ddfd6b862b9 | 416 | py | Python | Python/Maths/somme_chiffres.py | GeneralNZR/maths-and-javascript | 8a0e638e59808b1d987269dddac0b99c96c78c4a | [
"MIT"
] | 3 | 2021-10-01T06:11:28.000Z | 2021-10-04T20:50:07.000Z | Python/Maths/somme_chiffres.py | GeneralNZR/maths-and-javascript | 8a0e638e59808b1d987269dddac0b99c96c78c4a | [
"MIT"
] | null | null | null | Python/Maths/somme_chiffres.py | GeneralNZR/maths-and-javascript | 8a0e638e59808b1d987269dddac0b99c96c78c4a | [
"MIT"
] | null | null | null | def somme_chiffres(n: int) -> int:
"""
Description:
Fonction qui retourne la somme des chiffres d'un nombre.
Paramètres:
n: {int} -- Le nombre à traiter.
Retourne:
{int} -- La somme des chiffres du nombre.
Exemple:
>>> somme_chiffres(1010101)
4
"""
somme = 0
while n > 0:
somme += n % 10
n //= 10
return somme | 20.8 | 64 | 0.509615 |
20fa40ac90b9a7acec001589968305674ed42ac6 | 172 | py | Python | aiotractive/trackable_object.py | Danielhiversen/aiotractive | 20fe0be6bc63b44e3fedbc25c0e2533ca39132bf | [
"MIT"
] | null | null | null | aiotractive/trackable_object.py | Danielhiversen/aiotractive | 20fe0be6bc63b44e3fedbc25c0e2533ca39132bf | [
"MIT"
] | null | null | null | aiotractive/trackable_object.py | Danielhiversen/aiotractive | 20fe0be6bc63b44e3fedbc25c0e2533ca39132bf | [
"MIT"
] | null | null | null | from .data_object import DataObject
class TrackableObject(DataObject):
async def details(self):
return await self._api.request(f"trackable_object/{self.id}")
| 24.571429 | 69 | 0.75 |
68713077c5ac2583cae4b3ba087dceec028bf5f9 | 954 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/detection/models/ProbeTaskEnableSpec.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/detection/models/ProbeTaskEnableSpec.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/detection/models/ProbeTaskEnableSpec.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class ProbeTaskEnableSpec(object):
def __init__(self, taskId, enabled=None):
"""
:param enabled: (Optional) 默认:禁用; true:启用,false:禁用
:param taskId: 要启用或禁用的探测任务的task_id列表,列表长度[1,100)
"""
self.enabled = enabled
self.taskId = taskId
| 31.8 | 75 | 0.714885 |
986eb0accbbf731623c27cae67b164982e1395cf | 2,023 | py | Python | tests/test_model_template.py | scalarstop/scalarstop | 165ec9cfe70a0cb381cccb4efd5dd98aba74d8ca | [
"MIT"
] | 2 | 2021-04-15T00:17:15.000Z | 2021-05-10T23:11:56.000Z | tests/test_model_template.py | scalarstop/scalarstop | 165ec9cfe70a0cb381cccb4efd5dd98aba74d8ca | [
"MIT"
] | 2 | 2021-07-22T11:05:19.000Z | 2021-10-08T22:20:29.000Z | tests/test_model_template.py | scalarstop/scalarstop | 165ec9cfe70a0cb381cccb4efd5dd98aba74d8ca | [
"MIT"
] | null | null | null | """Unit tests for scalarstop.model_template."""
import doctest
import unittest
import tensorflow as tf
import scalarstop as sp
def load_tests(loader, tests, ignore): # pylint: disable=unused-argument
"""Have the unittest loader also run doctests."""
tests.addTests(doctest.DocTestSuite(sp.model_template))
return tests
class MyModelTemplate(sp.ModelTemplate):
"""Our example model template for testing."""
@sp.dataclass
class Hyperparams(sp.HyperparamsType):
"""HYperparams for MyModelTemplate."""
a: int
b: str = "hi"
def set_model(self):
"""Setting a new model."""
model = tf.keras.Sequential(
layers=[tf.keras.layers.Dense(units=self.hyperparams.a)]
)
model.compile()
return model
class MyModelTemplateForgotHyperparams(sp.ModelTemplate):
"""See what happens when we don't define hyperparams."""
Hyperparams = None # type: ignore
class TestModelTemplate(unittest.TestCase):
"""Tests for :py:class:`scalarstop.ModelTemplate`."""
def test_name(self):
"""Test that names work."""
model_template_1 = MyModelTemplate(
hyperparams=dict(a=1),
)
model_template_2 = MyModelTemplate(hyperparams=dict(a=1, b="hi"))
for i, model_template in enumerate((model_template_1, model_template_2)):
with self.subTest(f"model_template_{i}"):
self.assertEqual(
model_template.name, "MyModelTemplate-naro6iqyw9whazvkgp4w3qa2"
)
self.assertEqual(model_template.group_name, "MyModelTemplate")
self.assertEqual(
sp.dataclasses.asdict(model_template.hyperparams), dict(a=1, b="hi")
)
def test_missing_hyperparams_class(self):
"""Test what happens when the hyperparams class itself is missing."""
with self.assertRaises(sp.exceptions.YouForgotTheHyperparams):
MyModelTemplateForgotHyperparams()
| 31.609375 | 88 | 0.651508 |
0c21563beba210dac4e6025c4aff6b3ed06e0187 | 36,527 | py | Python | Blender/scripts/MOD_Opti_PSK_Dump/MOD07_Opti_PSK_Dump_armature_fail_only aniamtoin_work.py | ECToo/unrealtacticalmod | f78c77587d8a706697703be064fb854ffc31ea4c | [
"Unlicense"
] | null | null | null | Blender/scripts/MOD_Opti_PSK_Dump/MOD07_Opti_PSK_Dump_armature_fail_only aniamtoin_work.py | ECToo/unrealtacticalmod | f78c77587d8a706697703be064fb854ffc31ea4c | [
"Unlicense"
] | null | null | null | Blender/scripts/MOD_Opti_PSK_Dump/MOD07_Opti_PSK_Dump_armature_fail_only aniamtoin_work.py | ECToo/unrealtacticalmod | f78c77587d8a706697703be064fb854ffc31ea4c | [
"Unlicense"
] | null | null | null | #!BPY
"""
Name: 'Unreal Skeletal Mesh/Animation (.psk and .psa) MOD09'
Blender: 240
Group: 'Export'
Tooltip: 'Unreal Skeletal Mesh and Animation Export (*.psk, *.psa)'
"""
__author__ = "Optimus_P-Fat/Active_Trash"
__version__ = "0.0.2"
__bpydoc__ = """\
-- Unreal Skeletal Mesh and Animation Export (.psk and .psa) export script v0.0.1 --<br>
- NOTES:
- This script Exports To Unreal's PSK and PSA file formats for Skeletal Meshes and Animations. <br>
- This script DOES NOT support vertex animation! These require completely different file formats. <br>
- v0.0.1
- Initial version
- v0.0.2
- This version adds support for more than one material index!
"""
# DANGER! This code is complete garbage! Do not read!
# TODO: Throw some liscence junk in here: (maybe some GPL?)
# Liscence Junk: Use this script for whatever you feel like!
import Blender, time, os, math, sys as osSys, operator
from Blender import sys, Window, Draw, Scene, Mesh, Material, Texture, Image, Mathutils, Armature
from cStringIO import StringIO
from struct import pack, calcsize
# REFERENCE MATERIAL JUST IN CASE:
#
# U = x / sqrt(x^2 + y^2 + z^2)
# V = y / sqrt(x^2 + y^2 + z^2)
#
# Triangles specifed counter clockwise for front face
#
#defines for sizeofs
SIZE_FQUAT = 16
SIZE_FVECTOR = 12
SIZE_VJOINTPOS = 44
SIZE_ANIMINFOBINARY = 168
SIZE_VCHUNKHEADER = 32
SIZE_VMATERIAL = 88
SIZE_VBONE = 120
SIZE_FNAMEDBONEBINARY = 120
SIZE_VRAWBONEINFLUENCE = 12
SIZE_VQUATANIMKEY = 32
SIZE_VVERTEX = 16
SIZE_VPOINT = 12
SIZE_VTRIANGLE = 12
########################################################################
# Generic Object->Integer mapping
# the object must be usable as a dictionary key
class ObjMap:
def __init__(self):
self.dict = {}
self.next = 0
def get(self, obj):
if (obj in self.dict):
return self.dict[obj]
else:
id = self.next
self.next = self.next + 1
self.dict[obj] = id
return id
def items(self):
getval = operator.itemgetter(0)
getkey = operator.itemgetter(1)
return map(getval, sorted(self.dict.items(), key=getkey))
########################################################################
# RG - UNREAL DATA STRUCTS - CONVERTED FROM C STRUCTS GIVEN ON UDN SITE
# provided here: http://udn.epicgames.com/Two/BinaryFormatSpecifications.html
class FQuat:
def __init__(self):
self.X = 0.0
self.Y = 0.0
self.Z = 0.0
self.W = 1.0
def dump(self):
data = pack('ffff', self.X, self.Y, self.Z, self.W)
return data
def __cmp__(self, other):
return cmp(self.X, other.X) \
or cmp(self.Y, other.Y) \
or cmp(self.Z, other.Z) \
or cmp(self.W, other.W)
def __hash__(self):
return hash(self.X) ^ hash(self.Y) ^ hash(self.Z) ^ hash(self.W)
def __str__(self):
return "[%f,%f,%f,%f](FQuat)" % (self.X, self.Y, self.Z, self.W)
class FVector:
def __init__(self, X=0.0, Y=0.0, Z=0.0):
self.X = X
self.Y = Y
self.Z = Z
def dump(self):
data = pack('fff', self.X, self.Y, self.Z)
return data
def __cmp__(self, other):
return cmp(self.X, other.X) \
or cmp(self.Y, other.Y) \
or cmp(self.Z, other.Z)
def __hash__(self):
return hash(self.X) ^ hash(self.Y) ^ hash(self.Z)
def dot(self, other):
return self.X * other.X + self.Y * other.Y + self.Z * other.Z
def cross(self, other):
return FVector(self.Y * other.Z - self.Z * other.Y,
self.Z * other.X - self.X * other.Z,
self.X * other.Y - self.Y * other.X)
def sub(self, other):
return FVector(self.X - other.X,
self.Y - other.Y,
self.Z - other.Z)
class VJointPos:
def __init__(self):
self.Orientation = FQuat()
self.Position = FVector()
self.Length = 0.0
self.XSize = 0.0
self.YSize = 0.0
self.ZSize = 0.0
def dump(self):
data = self.Orientation.dump() + self.Position.dump() + pack('4f', self.Length, self.XSize, self.YSize, self.ZSize)
return data
class AnimInfoBinary:
def __init__(self):
self.Name = "" # length=64
self.Group = "" # length=64
self.TotalBones = 0
self.RootInclude = 0
self.KeyCompressionStyle = 0
self.KeyQuotum = 0
self.KeyPrediction = 0.0
self.TrackTime = 0.0
self.AnimRate = 0.0
self.StartBone = 0
self.FirstRawFrame = 0
self.NumRawFrames = 0
def dump(self):
data = pack('64s64siiiifffiii', self.Name, self.Group, self.TotalBones, self.RootInclude, self.KeyCompressionStyle, self.KeyQuotum, self.KeyPrediction, self.TrackTime, self.AnimRate, self.StartBone, self.FirstRawFrame, self.NumRawFrames)
return data
class VChunkHeader:
def __init__(self, name, type_size):
self.ChunkID = name # length=20
self.TypeFlag = 1999801 # special value
self.DataSize = type_size
self.DataCount = 0
def dump(self):
data = pack('20siii', self.ChunkID, self.TypeFlag, self.DataSize, self.DataCount)
return data
class VMaterial:
def __init__(self):
self.MaterialName = "" # length=64
self.TextureIndex = 0
self.PolyFlags = 0 # DWORD
self.AuxMaterial = 0
self.AuxFlags = 0 # DWORD
self.LodBias = 0
self.LodStyle = 0
def dump(self):
data = pack('64siLiLii', self.MaterialName, self.TextureIndex, self.PolyFlags, self.AuxMaterial, self.AuxFlags, self.LodBias, self.LodStyle)
return data
class VBone:
def __init__(self):
self.Name = "" # length = 64
self.Flags = 0 # DWORD
self.NumChildren = 0
self.ParentIndex = 0
self.BonePos = VJointPos()
def dump(self):
data = pack('64sLii', self.Name, self.Flags, self.NumChildren, self.ParentIndex) + self.BonePos.dump()
return data
#same as above - whatever - this is how Epic does it...
class FNamedBoneBinary:
def __init__(self):
self.Name = "" # length = 64
self.Flags = 0 # DWORD
self.NumChildren = 0
self.ParentIndex = 0
self.BonePos = VJointPos()
self.IsRealBone = 0 # this is set to 1 when the bone is actually a bone in the mesh and not a dummy
def dump(self):
data = pack('64sLii', self.Name, self.Flags, self.NumChildren, self.ParentIndex) + self.BonePos.dump()
return data
class VRawBoneInfluence:
def __init__(self):
self.Weight = 0.0
self.PointIndex = 0
self.BoneIndex = 0
def dump(self):
data = pack('fii', self.Weight, self.PointIndex, self.BoneIndex)
return data
class VQuatAnimKey:
def __init__(self):
self.Position = FVector()
self.Orientation = FQuat()
self.Time = 0.0
def dump(self):
data = self.Position.dump() + self.Orientation.dump() + pack('f', self.Time)
return data
class VVertex:
def __init__(self):
self.PointIndex = 0 # WORD
self.U = 0.0
self.V = 0.0
self.MatIndex = 0 #BYTE
self.Reserved = 0 #BYTE
def dump(self):
data = pack('HHffBBH', self.PointIndex, 0, self.U, self.V, self.MatIndex, self.Reserved, 0)
return data
def __cmp__(self, other):
return cmp(self.PointIndex, other.PointIndex) \
or cmp(self.U, other.U) \
or cmp(self.V, other.V) \
or cmp(self.MatIndex, other.MatIndex) \
or cmp(self.Reserved, other.Reserved)
def __hash__(self):
return hash(self.PointIndex) \
^ hash(self.U) ^ hash(self.V) \
^ hash(self.MatIndex) \
^ hash(self.Reserved)
class VPoint:
def __init__(self):
self.Point = FVector()
def dump(self):
return self.Point.dump()
def __cmp__(self, other):
return cmp(self.Point, other.Point)
def __hash__(self):
return hash(self.Point)
class VTriangle:
def __init__(self):
self.WedgeIndex0 = 0 # WORD
self.WedgeIndex1 = 0 # WORD
self.WedgeIndex2 = 0 # WORD
self.MatIndex = 0 # BYTE
self.AuxMatIndex = 0 # BYTE
self.SmoothingGroups = 0 # DWORD
def dump(self):
data = pack('HHHBBL', self.WedgeIndex0, self.WedgeIndex1, self.WedgeIndex2, self.MatIndex, self.AuxMatIndex, self.SmoothingGroups)
return data
# END UNREAL DATA STRUCTS
########################################################################
#RG - helper class to handle the normal way the UT files are stored
#as sections consisting of a header and then a list of data structures
class FileSection:
def __init__(self, name, type_size):
self.Header = VChunkHeader(name, type_size)
self.Data = [] # list of datatypes
def dump(self):
data = self.Header.dump()
for i in range(len(self.Data)):
data = data + self.Data[i].dump()
return data
def UpdateHeader(self):
self.Header.DataCount = len(self.Data)
class PSKFile:
def __init__(self):
self.GeneralHeader = VChunkHeader("ACTRHEAD", 0)
self.Points = FileSection("PNTS0000", SIZE_VPOINT) #VPoint
self.Wedges = FileSection("VTXW0000", SIZE_VVERTEX) #VVertex
self.Faces = FileSection("FACE0000", SIZE_VTRIANGLE) #VTriangle
self.Materials = FileSection("MATT0000", SIZE_VMATERIAL) #VMaterial
self.Bones = FileSection("REFSKELT", SIZE_VBONE) #VBone
self.Influences = FileSection("RAWWEIGHTS", SIZE_VRAWBONEINFLUENCE) #VRawBoneInfluence
#RG - this mapping is not dumped, but is used internally to store the new point indices
# for vertex groups calculated during the mesh dump, so they can be used again
# to dump bone influences during the armature dump
#
# the key in this dictionary is the VertexGroup/Bone Name, and the value
# is a list of tuples containing the new point index and the weight, in that order
#
# Layout:
# { groupname : [ (index, weight), ... ], ... }
#
# example:
# { 'MyVertexGroup' : [ (0, 1.0), (5, 1.0), (3, 0.5) ] , 'OtherGroup' : [(2, 1.0)] }
self.VertexGroups = {}
def AddPoint(self, p):
#print 'AddPoint'
self.Points.Data.append(p)
def AddWedge(self, w):
#print 'AddWedge'
self.Wedges.Data.append(w)
def AddFace(self, f):
#print 'AddFace'
self.Faces.Data.append(f)
def AddMaterial(self, m):
#print 'AddMaterial'
self.Materials.Data.append(m)
def AddBone(self, b):
#print 'AddBone [%s]: Position: (x=%f, y=%f, z=%f) Rotation=(%f,%f,%f,%f)' % (b.Name, b.BonePos.Position.X, b.BonePos.Position.Y, b.BonePos.Position.Z, b.BonePos.Orientation.X,b.BonePos.Orientation.Y,b.BonePos.Orientation.Z,b.BonePos.Orientation.W)
self.Bones.Data.append(b)
def AddInfluence(self, i):
#print 'AddInfluence'
self.Influences.Data.append(i)
def UpdateHeaders(self):
self.Points.UpdateHeader()
self.Wedges.UpdateHeader()
self.Faces.UpdateHeader()
self.Materials.UpdateHeader()
self.Bones.UpdateHeader()
self.Influences.UpdateHeader()
def dump(self):
self.UpdateHeaders()
data = self.GeneralHeader.dump() + self.Points.dump() + self.Wedges.dump() + self.Faces.dump() + self.Materials.dump() + self.Bones.dump() + self.Influences.dump()
return data
def GetMatByIndex(self, mat_index):
if mat_index >= 0 and len(self.Materials.Data) > mat_index:
return self.Materials.Data[mat_index]
else:
m = VMaterial()
m.MaterialName = "Mat%i" % mat_index
self.AddMaterial(m)
return m
def PrintOut(self):
print '--- PSK FILE EXPORTED ---'
print 'point count: %i' % len(self.Points.Data)
print 'wedge count: %i' % len(self.Wedges.Data)
print 'face count: %i' % len(self.Faces.Data)
print 'material count: %i' % len(self.Materials.Data)
print 'bone count: %i' % len(self.Bones.Data)
print 'inlfuence count: %i' % len(self.Influences.Data)
print '-------------------------'
# PSA FILE NOTES FROM UDN:
#
# The raw key array holds all the keys for all the bones in all the specified sequences,
# organized as follows:
# For each AnimInfoBinary's sequence there are [Number of bones] times [Number of frames keys]
# in the VQuatAnimKeys, laid out as tracks of [numframes] keys for each bone in the order of
# the bones as defined in the array of FnamedBoneBinary in the PSA.
#
# Once the data from the PSK (now digested into native skeletal mesh) and PSA (digested into
# a native animation object containing one or more sequences) are associated together at runtime,
# bones are linked up by name. Any bone in a skeleton (from the PSK) that finds no partner in
# the animation sequence (from the PSA) will assume its reference pose stance ( as defined in
# the offsets & rotations that are in the VBones making up the reference skeleton from the PSK)
class PSAFile:
def __init__(self):
self.GeneralHeader = VChunkHeader("ANIMHEAD", 0)
self.Bones = FileSection("BONENAMES", SIZE_FNAMEDBONEBINARY) #FNamedBoneBinary
self.Animations = FileSection("ANIMINFO", SIZE_ANIMINFOBINARY) #AnimInfoBinary
self.RawKeys = FileSection("ANIMKEYS", SIZE_VQUATANIMKEY) #VQuatAnimKey
# this will take the format of key=Bone Name, value = (BoneIndex, Bone Object)
# THIS IS NOT DUMPED
self.BoneLookup = {}
def dump(self):
data = self.Generalheader.dump() + self.Bones.dump() + self.Animations.dump() + self.RawKeys.dump()
return data
def AddBone(self, b):
#LOUD
#print "AddBone: " + b.Name
self.Bones.Data.append(b)
def AddAnimation(self, a):
#LOUD
#print "AddAnimation: %s, TotalBones: %i, AnimRate: %f, NumRawFrames: %i, TrackTime: %f" % (a.Name, a.TotalBones, a.AnimRate, a.NumRawFrames, a.TrackTime)
self.Animations.Data.append(a)
def AddRawKey(self, k):
#LOUD
#print "AddRawKey [%i]: Time: %f, Quat: x=%f, y=%f, z=%f, w=%f, Position: x=%f, y=%f, z=%f" % (len(self.RawKeys.Data), k.Time, k.Orientation.X, k.Orientation.Y, k.Orientation.Z, k.Orientation.W, k.Position.X, k.Position.Y, k.Position.Z)
self.RawKeys.Data.append(k)
def UpdateHeaders(self):
self.Bones.UpdateHeader()
self.Animations.UpdateHeader()
self.RawKeys.UpdateHeader()
def GetBoneByIndex(self, bone_index):
if bone_index >= 0 and len(self.Bones.Data) > bone_index:
return self.Bones.Data[bone_index]
def IsEmpty(self):
return (len(self.Bones.Data) == 0 or len(self.Animations.Data) == 0)
def StoreBone(self, b):
self.BoneLookup[b.Name] = [-1, b]
def UseBone(self, bone_name):
if bone_name in self.BoneLookup:
bone_data = self.BoneLookup[bone_name]
if bone_data[0] == -1:
bone_data[0] = len(self.Bones.Data)
self.AddBone(bone_data[1])
#self.Bones.Data.append(bone_data[1])
return bone_data[0]
def GetBoneByName(self, bone_name):
if bone_name in self.BoneLookup:
bone_data = self.BoneLookup[bone_name]
return bone_data[1]
def GetBoneIndex(self, bone_name):
if bone_name in self.BoneLookup:
bone_data = self.BoneLookup[bone_name]
return bone_data[0]
def dump(self):
self.UpdateHeaders()
data = self.GeneralHeader.dump() + self.Bones.dump() + self.Animations.dump() + self.RawKeys.dump()
return data
def PrintOut(self):
print '--- PSA FILE EXPORTED ---'
print 'bone count: %i' % len(self.Bones.Data)
print 'animation count: %i' % len(self.Animations.Data)
print 'rawkey count: %i' % len(self.RawKeys.Data)
print '-------------------------'
####################################
# helpers to create bone structs
def make_vbone(name, parent_index, child_count, orientation_quat, position_vect):
bone = VBone()
bone.Name = name
bone.ParentIndex = parent_index
bone.NumChildren = child_count
bone.BonePos.Orientation = orientation_quat
bone.BonePos.Position.X = position_vect.x
bone.BonePos.Position.Y = position_vect.y
bone.BonePos.Position.Z = position_vect.z
#these values seem to be ignored?
#bone.BonePos.Length = tail.length
#bone.BonePos.XSize = tail.x
#bone.BonePos.YSize = tail.y
#bone.BonePos.ZSize = tail.z
return bone
def make_namedbonebinary(name, parent_index, child_count, orientation_quat, position_vect, is_real):
bone = FNamedBoneBinary()
bone.Name = name
bone.ParentIndex = parent_index
bone.NumChildren = child_count
bone.BonePos.Orientation = orientation_quat
bone.BonePos.Position.X = position_vect.x
bone.BonePos.Position.Y = position_vect.y
bone.BonePos.Position.Z = position_vect.z
bone.IsRealBone = is_real
return bone
##################################################
#RG - check to make sure face isnt a line
def is_1d_face(blender_face):
return ((blender_face.v[0].co == blender_face.v[1].co) or \
(blender_face.v[1].co == blender_face.v[2].co) or \
(blender_face.v[2].co == blender_face.v[0].co))
##################################################
# Actual object parsing functions
def parse_meshes(blender_meshes, psk_file):
print "----- parsing meshes -----"
#print 'blender_meshes length: %i' % (len(blender_meshes))
for current_obj in blender_meshes:
current_mesh = current_obj.getData()
#print 'current mesh name: ' + current_mesh.name
#raw_mesh = Mesh.Get(current_mesh.name)
# Get the world transform for the object
object_mat = current_obj.mat
# add material 0
#m = VMaterial()
#m.MaterialName = "Mat0"
#psk_file.AddMaterial(m)
#print 'faces: %i' % (len(current_mesh.faces))
#print 'verts: %i' % (len(current_mesh.verts))
#print 'has face UV: %i' % (current_mesh.hasFaceUV())
points = ObjMap()
wedges = ObjMap()
discarded_face_count = 0
#print ' -- Dumping Mesh Faces -- '
for current_face in current_mesh.faces:
#print ' -- Dumping UVs -- '
#print current_face.uv
if len(current_face.v) != 3:
raise RuntimeError("Non-triangular face (%i)" % len(current_face.v))
#todo: add two fake faces made of triangles?
#RG - apparently blender sometimes has problems when you do quad to triangle
# conversion, and ends up creating faces that have only TWO points -
# one of the points is simply in the vertex list for the face twice.
# This is bad, since we can't get a real face normal for a LINE, we need
# a plane for this. So, before we add the face to the list of real faces,
# ensure that the face is actually a plane, and not a line. If it is not
# planar, just discard it and notify the user in the console after we're
# done dumping the rest of the faces
if not is_1d_face(current_face):
wedge_list = []
vect_list = []
#get or create the current material
m = psk_file.GetMatByIndex(current_face.mat)
print 'material: %i' % (current_face.mat)
for i in range(3):
vert = current_face.v[i]
if len(current_face.uv) != 3:
#print "WARNING: Current face is missing UV coordinates - writing 0,0..."
uv = [0.0, 0.0]
else:
uv = list(current_face.uv[i])
#flip V coordinate because UEd requires it and DOESN'T flip it on its own like it
#does with the mesh Y coordinates.
#this is otherwise known as MAGIC-2
uv[1] = 1.0 - uv[1]
#print "Vertex UV: ", uv, " UVCO STUFF:", vert.uvco.x, vert.uvco.y
# RE - Append untransformed vector (for normal calc below)
# TODO: convert to Blender.Mathutils
vect_list.append(FVector(vert.co.x, vert.co.y, vert.co.z))
# Transform position for export
vpos = vert.co * object_mat
# Create the point
p = VPoint()
p.Point.X = vpos.x
p.Point.Y = vpos.y
p.Point.Z = vpos.z
# Create the wedge
w = VVertex()
w.MatIndex = current_face.mat
w.PointIndex = points.get(p) # get index from map
w.U = uv[0]
w.V = uv[1]
wedge_index = wedges.get(w)
wedge_list.append(wedge_index)
#print results
#print 'result PointIndex=%i, U=%f, V=%f, wedge_index=%i' % (
# w.PointIndex,
# w.U,
# w.V,
# wedge_index)
# Determine face vertex order
# get normal from blender
no = current_face.no
# TODO: convert to Blender.Mathutils
# convert to FVector
norm = FVector(no[0], no[1], no[2])
# Calculate the normal of the face in blender order
tnorm = vect_list[1].sub(vect_list[0]).cross(vect_list[2].sub(vect_list[1]))
# RE - dot the normal from blender order against the blender normal
# this gives the product of the two vectors' lengths along the blender normal axis
# all that matters is the sign
dot = norm.dot(tnorm)
# print results
#print 'face norm: (%f,%f,%f), tnorm=(%f,%f,%f), dot=%f' % (
# norm.X, norm.Y, norm.Z,
# tnorm.X, tnorm.Y, tnorm.Z,
# dot)
tri = VTriangle()
# RE - magic: if the dot product above > 0, order the vertices 2, 1, 0
# if the dot product above < 0, order the vertices 0, 1, 2
# if the dot product is 0, then blender's normal is coplanar with the face
# and we cannot deduce which side of the face is the outside of the mesh
if (dot > 0):
(tri.WedgeIndex2, tri.WedgeIndex1, tri.WedgeIndex0) = wedge_list
elif (dot < 0):
(tri.WedgeIndex0, tri.WedgeIndex1, tri.WedgeIndex2) = wedge_list
else:
raise RuntimeError("normal vector coplanar with face! points:", current_face.v[0].co, current_face.v[1].co, current_face.v[2].co)
tri.MatIndex = current_face.mat
psk_file.AddFace(tri)
else:
discarded_face_count = discarded_face_count + 1
for point in points.items():
psk_file.AddPoint(point)
for wedge in wedges.items():
psk_file.AddWedge(wedge)
#RG - if we happend upon any non-planar faces above that we've discarded,
# just let the user know we discarded them here in case they want
# to investigate
if discarded_face_count > 0:
print "INFO: Discarded %i non-planar faces." % (discarded_face_count)
#RG - walk through the vertex groups and find the indexes into the PSK points array
#for them, then store that index and the weight as a tuple in a new list of
#verts for the group that we can look up later by bone name, since Blender matches
#verts to bones for influences by having the VertexGroup named the same thing as
#the bone
vertex_groups = current_mesh.getVertGroupNames()
for group in vertex_groups:
verts = current_mesh.getVertsFromGroup(group, 1)
vert_list = []
for vert_data in verts:
vert_index = vert_data[0]
vert_weight = vert_data[1]
vert = current_mesh.verts[vert_index]
vpos = vert.co * object_mat
p = VPoint()
p.Point.X = vpos.x
p.Point.Y = vpos.y
p.Point.Z = vpos.z
point_index = points.get(p)
v_item = (point_index, vert_weight)
vert_list.append(v_item)
#print 'VertexGroup: %s, vert index=%i, point_index=%i' % (group, vert_index, point_index)
psk_file.VertexGroups[group] = vert_list
def make_fquat(bquat):
quat = FQuat()
#flip handedness for UT = set x,y,z to negative (rotate in other direction)
quat.X = -bquat.x
quat.Y = -bquat.y
quat.Z = -bquat.z
quat.W = bquat.w
return quat
# TODO: remove this 1am hack
nbone = 1
def parse_bone(blender_bone, psk_file, psa_file, parent_id, is_root_bone, parent_mat):
global nbone # look it's evil!
#print ' --- Dumping Bone --- '
print 'blender bone name: ' + blender_bone.name
if blender_bone.hasChildren():
child_count = len(blender_bone.children)
else:
child_count = 0
if (parent_mat):
head = blender_bone.head['BONESPACE'] * parent_mat
tail = blender_bone.tail['BONESPACE'] * parent_mat
rot_mat = blender_bone.matrix['BONESPACE'] * parent_mat.rotationPart()
quat = make_fquat(rot_mat.toQuat())
else:
head = blender_bone.head['BONESPACE']
tail = blender_bone.tail['BONESPACE']
quat = make_fquat(blender_bone.matrix['BONESPACE'].toQuat())
bone_vect = tail-head
#LOUD
#print "Head: ", head
#print "Tail: ", tail
#print "Quat: ", quat
final_parent_id = parent_id
#RG/RE -
#if we are not seperated by a small distance, create a dummy bone for the displacement
#this is only needed for root bones, since UT assumes a connected skeleton, and from here
#down the chain we just use "tail" as an endpoint
#if(head.length > 0.001 and is_root_bone == 1):
if(0):
pb = make_vbone("dummy_" + blender_bone.name, parent_id, 1, FQuat(), head)
psk_file.AddBone(pb)
pbb = make_namedbonebinary("dummy_" + blender_bone.name, parent_id, 1, FQuat(), head, 0)
psa_file.StoreBone(pbb)
final_parent_id = nbone
nbone = nbone + 1
tail = tail-head
my_id = nbone
pb = make_vbone(blender_bone.name, final_parent_id, child_count, quat, tail)
#pb = make_vbone(blender_bone.name, final_parent_id, child_count, quat, head)
psk_file.AddBone(pb)
pbb = make_namedbonebinary(blender_bone.name, final_parent_id, child_count, quat, tail, 1)
#pbb = make_namedbonebinary(blender_bone.name, final_parent_id, child_count, quat, head, 1)
psa_file.StoreBone(pbb)
nbone = nbone + 1
#RG - dump influences for this bone - use the data we collected in the mesh dump phase
# to map our bones to vertex groups
if blender_bone.name in psk_file.VertexGroups:
vertex_list = psk_file.VertexGroups[blender_bone.name]
for vertex_data in vertex_list:
point_index = vertex_data[0]
vertex_weight = vertex_data[1]
influence = VRawBoneInfluence()
influence.Weight = vertex_weight
influence.BoneIndex = my_id
influence.PointIndex = point_index
#print 'Adding Bone Influence for [%s] = Point Index=%i, Weight=%f' % (blender_bone.name, point_index, vertex_weight)
psk_file.AddInfluence(influence)
#recursively dump child bones
if blender_bone.hasChildren():
for current_child_bone in blender_bone.children:
parse_bone(current_child_bone, psk_file, psa_file, my_id, 0, None)
def make_armature_bone(blender_object, psk_file, psa_file):
# this makes a dummy bone to offset the armature origin for each armature
#global nbone #hacky
#my_id = nbone
armature = blender_object.getData()
#screw efficiency! just calc this again
bones = [x for x in armature.bones.values() if not x.hasParent()]
child_count = len(bones)
object_matrix = blender_object.mat
quat = make_fquat(object_matrix.toQuat())
tail = object_matrix.translationPart()
#for psk file
root_bone = make_vbone(blender_object.name, 0, child_count, quat, tail)
psk_file.AddBone(root_bone)
print "blender_object.name:",blender_object.name
#for psa file
root_bone_binary = make_namedbonebinary(blender_object.name, 0, child_count, quat, tail, 0)
psa_file.StoreBone(root_bone_binary)
#nbone = nbone + 1
#return my_id
def parse_armature(blender_armature, psk_file, psa_file):
print "----- parsing armature -----"
#print 'blender_armature length: %i' % (len(blender_armature))
#magic 0 sized root bone for UT - this is where all armature dummy bones will attach
#dont increment nbone here because we initialize it to 1 (hackity hackity hack)
#count top level bones first. screw efficiency again - ohnoz it will take dayz to runz0r!
child_count = 0
for current_obj in blender_armature:
current_armature = current_obj.getData()
bones = [x for x in current_armature.bones.values() if not x.hasParent()]
child_count += len(bones)
#make root bone
'''
pb = make_vbone("", 0, child_count, FQuat(), Blender.Mathutils.Vector(0,0,0))
psk_file.AddBone(pb)
pbb = make_namedbonebinary("", 0, child_count, FQuat(), Blender.Mathutils.Vector(0,0,0), 0)
psa_file.StoreBone(pbb)
'''
for current_obj in blender_armature:
print 'current armature name: ' + current_obj.name
current_armature = current_obj.getData()
make_armature_bone(current_obj, psk_file, psa_file)
#we dont want children here - only the top level bones of the armature itself
#we will recursively dump the child bones as we dump these bones
bones = [x for x in current_armature.bones.values() if not x.hasParent()]
for current_bone in bones:
parse_bone(current_bone, psk_file, psa_file, 0, 1, current_obj.mat)
# get blender objects by type
def get_blender_objects(objects, type):
return [x for x in objects if x.getType() == type]
#strips current extension (if any) from filename and replaces it with extension passed in
def make_filename_ext(filename, extension):
new_filename = ''
extension_index = filename.rfind('.')
if extension_index == -1:
new_filename = filename + extension
else:
new_filename = filename[0:extension_index] + extension
return new_filename
# returns the quaternion Grassman product a*b
# this is the same as the rotation a(b(x))
# (ie. the same as B*A if A and B are matrices representing
# the rotations described by quaternions a and b)
def grassman(a, b):
return Blender.Mathutils.Quaternion(
a.w*b.w - a.x*b.x - a.y*b.y - a.z*b.z,
a.w*b.x + a.x*b.w + a.y*b.z - a.z*b.y,
a.w*b.y - a.x*b.z + a.y*b.w + a.z*b.x,
a.w*b.z + a.x*b.y - a.y*b.x + a.z*b.w)
def parse_animation(blender_scene, psa_file):
print "----- parsing animation -----"
blender_context = blender_scene.getRenderingContext()
anim_rate = blender_context.framesPerSec()
#print 'Scene: %s Start Frame: %i, End Frame: %i' % (blender_scene.getName(), blender_context.startFrame(), blender_context.endFrame())
#print "Frames Per Sec: %i" % anim_rate
export_objects = blender_scene.objects
blender_armatures = get_blender_objects(export_objects, 'Armature')
cur_frame_index = 0
for act in Armature.NLA.GetActions().values():
action_name = act.getName()
action_keyframes = act.getFrameNumbers()
start_frame = min(action_keyframes)
end_frame = max(action_keyframes)
scene_frames = xrange(start_frame, end_frame+1)
#scene_frames = action_keyframes
frame_count = len(scene_frames)
anim = AnimInfoBinary()
anim.Name = action_name
anim.Group = "" #wtf is group?
anim.NumRawFrames = frame_count
anim.AnimRate = anim_rate
anim.FirstRawFrame = cur_frame_index
count_previous_keys = len(psa_file.RawKeys.Data)
#print "------------ Action: %s, frame keys:" % (action_name) , action_keys
print "----- Action: %s" % action_name;
unique_bone_indexes = {}
for obj in blender_armatures:
current_armature = obj.getData()
act.setActive(obj)
# bone lookup table
bones_lookup = {}
for bone in current_armature.bones.values():
bones_lookup[bone.name] = bone
frame_count = len(scene_frames)
#print "Frame Count: %i" % frame_count
pose_data = obj.getPose()
#these must be ordered in the order the bones will show up in the PSA file!
ordered_bones = {}
ordered_bones = sorted([(psa_file.UseBone(x.name), x) for x in pose_data.bones.values()], key=operator.itemgetter(0))
#############################
# ORDERED FRAME, BONE
#for frame in scene_frames:
for i in range(frame_count):
frame = scene_frames[i]
#LOUD
#print "==== outputting frame %i ===" % frame
if frame_count > i+1:
next_frame = scene_frames[i+1]
#print "This Frame: %i, Next Frame: %i" % (frame, next_frame)
else:
next_frame = -1
#print "This Frame: %i, Next Frame: NONE" % frame
Blender.Set('curframe', frame)
cur_frame_index = cur_frame_index + 1
for bone_data in ordered_bones:
bone_index = bone_data[0]
pose_bone = bone_data[1]
blender_bone = bones_lookup[pose_bone.name]
#just need the total unique bones used, later for this AnimInfoBinary
unique_bone_indexes[bone_index] = bone_index
#LOUD
#print "-------------------", pose_bone.name
head = blender_bone.head['BONESPACE']
tail = blender_bone.tail['BONESPACE']
quat = blender_bone.matrix['BONESPACE'].toQuat()
#print "Head: ", head
#print "Tail: ", tail
#print "Quat: ", quat
#print "orig quat: ", quat
#print "pose quat: ", pose_bone.quat
#head = pose_bone.head
quat = grassman(quat, pose_bone.quat)
#WOW
#tail = (pose_bone.quat * (tail-head)) + head + pose_bone.loc
#tail = (pose_bone.quat * (tail)) + tail + pose_bone.loc
tail = (pose_bone.quat * (tail-head)) + (head) + pose_bone.loc
# no parent? apply armature transform
'''
if not blender_bone.hasParent():
parent_mat = obj.mat
head = head * parent_mat
tail = tail * parent_mat
quat = grassman(parent_mat.toQuat(), quat)
'''
#print "Head: ", head
#print "Tail: ", tail
#print "Quat: ", quat
#print "L0c: ", pose_bone.loc
vkey = VQuatAnimKey()
vkey.Position.X = tail.x
vkey.Position.Y = tail.y
vkey.Position.Z = tail.z
#vkey.Position.X = 0.0
#vkey.Position.Y = 1.0
#vkey.Position.Z = 0.0
vkey.Orientation = make_fquat(quat)
#time frm now till next frame = diff / framesPerSec
if next_frame >= 0:
diff = next_frame - frame
else:
diff = 1.0
#print "Diff = ", diff
vkey.Time = float(diff)/float(blender_context.framesPerSec())
psa_file.AddRawKey(vkey)
#done looping frames
#done looping armatures
#continue adding animInfoBinary counts here
anim.TotalBones = len(unique_bone_indexes)
anim.TrackTime = float(frame_count) / anim.AnimRate
psa_file.AddAnimation(anim)
def fs_callback(filename):
print "======EXPORTING TO UNREAL SKELETAL MESH FORMATS========\r\n"
psk = PSKFile()
psa = PSAFile()
#sanity check - this should already have the extension, but just in case, we'll give it one if it doesn't
psk_filename = make_filename_ext(filename, '.psk')
#make the psa filename
psa_filename = make_filename_ext(filename, '.psa')
#print 'PSK File: ' + psk_filename
#print 'PSA File: ' + psa_filename
blender_meshes = []
blender_armature = []
current_scene = Blender.Scene.GetCurrent()
current_scene.makeCurrent()
cur_frame = Blender.Get('curframe') #store current frame before we start walking them during animation parse
objects = current_scene.getChildren()
blender_meshes = get_blender_objects(objects, 'Mesh')
blender_armature = get_blender_objects(objects, 'Armature')
try:
#######################
# STEP 1: MESH DUMP
# we build the vertexes, wedges, and faces in here, as well as a vertexgroup lookup table
# for the armature parse
parse_meshes(blender_meshes, psk)
except:
Blender.Set('curframe', cur_frame) #set frame back to original frame
print "Exception during Mesh Parse"
raise
try:
#######################
# STEP 2: ARMATURE DUMP
# IMPORTANT: do this AFTER parsing meshes - we need to use the vertex group data from
# the mesh parse in here to generate bone influences
parse_armature(blender_armature, psk, psa)
except:
Blender.Set('curframe', cur_frame) #set frame back to original frame
print "Exception during Armature Parse"
raise
try:
#######################
# STEP 3: ANIMATION DUMP
# IMPORTANT: do AFTER parsing bones - we need to do bone lookups in here during animation frames
parse_animation(current_scene, psa)
except:
Blender.Set('curframe', cur_frame) #set frame back to original frame
print "Exception during Animation Parse"
raise
# reset current frame
Blender.Set('curframe', cur_frame) #set frame back to original frame
##########################
# FILE WRITE
#RG - dump psk file
psk.PrintOut()
file = open(psk_filename, "wb")
file.write(psk.dump())
file.close()
print 'Successfully Exported File: ' + psk_filename
#RG - dump psa file
if not psa.IsEmpty():
psa.PrintOut()
file = open(psa_filename, "wb")
file.write(psa.dump())
file.close()
print 'Successfully Exported File: ' + psa_filename
else:
print 'No Animations to Export'
if __name__ == '__main__':
Window.FileSelector(fs_callback, 'Export PSK/PSA File', sys.makename(ext='.psk'))
#fs_callback('c:\\ChainBenderSideTurret.psk')
| 29.964725 | 252 | 0.650642 |
ec6a498a22c6120f1221f757f2216fa47a9aa4aa | 2,526 | py | Python | 2017/plot_stats/sloc_runtime.py | max-f/advent-of-code | 3c0ee995f7c0691418ecb86cbfa201b3d03131b8 | [
"MIT"
] | null | null | null | 2017/plot_stats/sloc_runtime.py | max-f/advent-of-code | 3c0ee995f7c0691418ecb86cbfa201b3d03131b8 | [
"MIT"
] | null | null | null | 2017/plot_stats/sloc_runtime.py | max-f/advent-of-code | 3c0ee995f7c0691418ecb86cbfa201b3d03131b8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import datetime
import os
import subprocess
import time
from typing import List
import matplotlib.pyplot as plt
import seaborn as sns
def create_timings() -> List[int]:
cwd = os.getcwd()
os.chdir('..')
timings = list()
for day in range(1, 26):
start = time.time()
subprocess.run(['python', 'd{:02}.py'.format(day)], stdout=subprocess.DEVNULL)
elapsed_time = time.time() - start
timings.append(elapsed_time)
os.chdir(cwd)
return timings
def read_timings() -> List[float]:
try:
with open('timings.txt', 'r') as f:
input_str = f.read()
lines = input_str.split('\n')
print('Using timings from {}'.format(lines[0]))
timings = lines[-1].split(',')
timings = [float(x.strip('[ ]')) for x in timings]
return timings
except:
timings = create_timings()
write_timings(timings)
return timings
def write_timings(timings: List[int]) -> None:
with open('timings.txt', 'w') as f:
print('Writing timings to timings.txt')
f.write(str(datetime.datetime.now()) + '\n')
f.write(str(timings))
def create_plot(xs: List[str], slocs: List[int], runtimes: List[int]) -> None:
sns.set_style('dark', {'axes.facecolor': 'eeeeee', 'axes.labelcolor': '555555',
'xtick.color': '555555', 'ytick.color': '555555'})
fig = plt.figure()
ax = fig.add_subplot(111)
axes = [ax, ax.twinx()]
axes[0].set_yscale('linear')
axes[-1].set_yscale('log')
axes[0].set_xlabel("Day")
axes[0].set_ylabel("SLOC")
axes[-1].set_ylabel("Runtime in secs")
sns.barplot(xs, slocs, ax=axes[0], alpha=0.6)
p1, = axes[-1].plot(xs, runtimes, 'o-', lw=1.5, color='#080808', label='Runtime')
lns = [p1]
ax.legend(handles=lns, loc='upper left')
plt.savefig('img/sloc_runtime.png', dpi=fig.dpi, bbox_inches='tight')
plt.show()
def main():
xticks = ['{:02}'.format(x) for x in range(1, 26)]
# Values taken from CLI program SLOCCount
sloc_counts = [34, 24, 60, 28, 38, 46, 101, 40, 38, 48, 33, 19, 33, 33, 34,
31, 26, 106, 66, 51, 45, 68, 40, 43, 66]
if os.path.isfile('timings.txt'):
timings = read_timings()
else:
timings = create_timings()
write_timings(timings)
create_plot(xticks, sloc_counts, timings)
if __name__ == '__main__':
main()
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4
| 28.066667 | 86 | 0.591449 |
1b9f9bdfa3fab4b38860404ffc20a7348fc2f8d0 | 565 | py | Python | src/minterpy/polynomials/__init__.py | karanprime/minterpy | 75d8976b2ddcf79ebaa29b4cb80691ca02a6d180 | [
"MIT"
] | 13 | 2021-11-30T17:52:45.000Z | 2021-12-09T10:05:20.000Z | src/minterpy/polynomials/__init__.py | karanprime/minterpy | 75d8976b2ddcf79ebaa29b4cb80691ca02a6d180 | [
"MIT"
] | 2 | 2021-11-30T18:31:22.000Z | 2022-02-10T10:13:37.000Z | src/minterpy/polynomials/__init__.py | karanprime/minterpy | 75d8976b2ddcf79ebaa29b4cb80691ca02a6d180 | [
"MIT"
] | 6 | 2021-11-30T18:17:26.000Z | 2022-02-18T17:38:27.000Z | """
The submodule `polynomials` is part of `minterpy`.
It contains concrete implementations for multivariate canonical, Newton and Lagrange polynomial base, respectively.
"""
__all__ = []
from . import canonical_polynomial # noqa
from .canonical_polynomial import * # noqa
__all__ += canonical_polynomial.__all__
from . import newton_polynomial # noqa
from .newton_polynomial import * # noqa
__all__ += newton_polynomial.__all__
from . import lagrange_polynomial # noqa
from .lagrange_polynomial import * # noqa
__all__ += lagrange_polynomial.__all__
| 24.565217 | 115 | 0.778761 |
57aa0cd3da7e1b482602b0dedc54371a1c432836 | 13,127 | py | Python | ctapipe/core/tests/test_component.py | chaimain/ctapipe | ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5 | [
"BSD-3-Clause"
] | null | null | null | ctapipe/core/tests/test_component.py | chaimain/ctapipe | ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5 | [
"BSD-3-Clause"
] | null | null | null | ctapipe/core/tests/test_component.py | chaimain/ctapipe | ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5 | [
"BSD-3-Clause"
] | 1 | 2020-05-16T10:23:51.000Z | 2020-05-16T10:23:51.000Z | from abc import abstractmethod, ABC
import pytest
from traitlets import Float, TraitError, Int
from traitlets.config.loader import Config
import astropy.units as u
import warnings
from ctapipe.core import Component
def test_non_abstract_children():
""" check that we can find all constructable children """
from ctapipe.core import non_abstract_children
class AbstractBase(ABC):
@abstractmethod
def method(self):
pass
class Child1(AbstractBase):
def method(self):
print("method of Child1")
class Child2(AbstractBase):
def method(self):
print("method of Child2")
class GrandChild(Child2):
def method(self):
print("method of GrandChild")
class AbstractChild(AbstractBase):
pass
kids = non_abstract_children(AbstractBase)
assert Child1 in kids
assert Child2 in kids
assert GrandChild in kids
assert AbstractChild not in kids
def test_get_config_from_hierarchy():
from ctapipe.core.component import find_config_in_hierarchy
class Bottom(Component):
val = Float(0).tag(config=True)
class Middle(Component):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.bottom = Bottom(parent=self)
class Top(Component):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.middle = Middle(parent=self)
# test with root present
c = Config({"Top": {"Middle": {"Bottom": {"val": 5}}}})
t = Top(config=c)
val = find_config_in_hierarchy(t.middle, "Bottom", "val")
assert val == 5
# test with root not present
c = Config({"Middle": {"Bottom": {"val": 5}}})
t = Top(config=c)
val = find_config_in_hierarchy(t.middle, "Bottom", "val")
assert val == 5
class SubComponent(Component):
""" An Example Component, this is the help text"""
value = Float(default_value=-1.0, help="float parameter").tag(config=True)
class ExampleComponent(Component):
""" An Example Component, this is the help text"""
param = Float(default_value=1.0, help="float parameter").tag(config=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sub = SubComponent(parent=self)
class ExampleSubclass1(ExampleComponent):
""" a subclass of ExampleComponent"""
pass
class ExampleSubclass2(ExampleComponent):
""" Another ExampleComponent """
description = "A shorter description"
param = Float(default_value=3.0, help="float parameter").tag(config=True)
extra = Float(default_value=5.0, help="float parameter").tag(config=True)
def test_component_is_abstract():
""" check that we can make an abstract component """
class AbstractComponent(Component):
@abstractmethod
def test(self):
pass
with pytest.raises(TypeError):
AbstractComponent()
def test_component_simple():
"""
very basic test to construct a component and check
that it's traits work correctly
"""
comp = ExampleComponent()
assert comp.has_trait("param") is True
comp.param = 1.2
with pytest.raises(TraitError):
comp.param = "badvalue"
def test_component_kwarg_setting():
""" check that we can construct a component by setting traits via kwargs """
comp = ExampleComponent(param=3)
assert comp.param == 3
# Invalid type
with pytest.raises(TraitError):
comp = ExampleComponent(param="badvalue")
# Invalid traitlet
with pytest.raises(TraitError):
# the current traitlets version already warns about this
# will be raising an error in the future, but we want the error
# now, filter the warning here to not clutter the log
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
comp = ExampleComponent(incorrect="wrong")
def test_help():
""" check that component help strings are generated correctly """
help_msg = ExampleComponent.class_get_help()
assert "Default: 1.0" in help_msg
def test_config():
""" check that components can be constructed by config dict """
config = Config()
config["ExampleComponent"] = Config()
config["ExampleComponent"]["param"] = 199.0
comp = ExampleComponent(config=config)
assert comp.param == 199.0
def test_config_baseclass():
""" check that parent and subclass configuration works """
config = Config()
config["ExampleComponent"] = Config()
config["ExampleComponent"]["param"] = 199.0
comp1 = ExampleSubclass1(config=config)
assert comp1.param == 199.0
comp2 = ExampleSubclass2(config=config)
assert comp2.param == 199.0
def test_config_subclass1():
"""check sub-class config"""
config = Config()
config["ExampleSubclass1"] = Config()
config["ExampleSubclass1"]["param"] = 199.0
comp = ExampleComponent(config=config)
assert comp.param == 1.0
def test_config_subclass2():
"""check another sub-class config"""
config = Config()
config["ExampleSubclass2"] = Config()
config["ExampleSubclass2"]["param"] = 199.0
comp = ExampleComponent(config=config)
assert comp.param == 1.0
def test_config_sibling1():
""" check sibling config """
config = Config()
config["ExampleSubclass1"] = Config()
config["ExampleSubclass1"]["param"] = 199.0
comp1 = ExampleSubclass1(config=config)
assert comp1.param == 199.0
comp2 = ExampleSubclass2(config=config)
assert comp2.param == 3.0
def test_config_sibling2():
""" check sibling config """
config = Config()
config["ExampleSubclass2"] = Config()
config["ExampleSubclass2"]["param"] = 199.0
comp1 = ExampleSubclass1(config=config)
assert comp1.param == 1.0
comp2 = ExampleSubclass2(config=config)
assert comp2.param == 199.0
def test_config_baseclass_then_subclass():
""" check base and subclass config """
config = Config()
config["ExampleComponent"] = Config()
config["ExampleComponent"]["param"] = 199.0
config["ExampleSubclass1"] = Config()
config["ExampleSubclass1"]["param"] = 229.0
comp = ExampleSubclass1(config=config)
assert comp.param == 229.0
def test_config_subclass_then_baseclass():
""" check subclass and base config """
config = Config()
config["ExampleSubclass1"] = Config()
config["ExampleSubclass1"]["param"] = 229.0
config["ExampleComponent"] = Config()
config["ExampleComponent"]["param"] = 199.0
comp = ExampleSubclass1(config=config)
assert comp.param == 229.0
def test_config_override():
""" check that we can override a trait set in the config """
config = Config()
config["ExampleComponent"] = Config()
config["ExampleComponent"]["param"] = 199.0
comp = ExampleComponent(config=config, param=229.0)
assert comp.param == 229.0
def test_config_override_subclass():
""" check that we can override a trait set in the config """
config = Config()
config["ExampleComponent"] = Config()
config["ExampleComponent"]["param"] = 199.0
comp = ExampleSubclass1(config=config, param=229.0)
assert comp.param == 229.0
def test_extra():
""" check that traits are settable """
comp = ExampleSubclass2(extra=229.0)
assert comp.has_trait("extra") is True
assert comp.extra == 229.0
def test_extra_config():
""" check setting trait via config """
config = Config()
config["ExampleSubclass2"] = Config()
config["ExampleSubclass2"]["extra"] = 229.0
comp = ExampleSubclass2(config=config)
assert comp.extra == 229.0
def test_unknown_traitlet_raises():
""" check that setting an incorrect trait raises an exception """
with pytest.raises(TraitError):
# the current traitlets version already warns about this
# will be raising an error in the future, but we want the error
# now, filter the warning here to not clutter the log
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
ExampleSubclass1(extra=229.0)
def test_extra_config_missing():
"""
check that setting an incorrect trait via config also raises
an exception
"""
config = Config()
config["ExampleSubclass1"] = Config()
config["ExampleSubclass1"]["extra"] = 199.0
with pytest.warns(UserWarning):
comp = ExampleSubclass1(config=config)
assert comp.has_trait("extra") is False
with pytest.raises(AttributeError):
assert comp.extra == 229.0
def test_default():
""" check default values work"""
comp = ExampleComponent()
assert comp.param == 1.0
def test_default_subclass():
""" check default values work in subclasses"""
comp = ExampleSubclass1()
assert comp.param == 1.0
def test_default_subclass_override():
""" check overrides work in subclasses"""
comp = ExampleSubclass2()
assert comp.param == 3.0
def test_change_default():
""" check we can change a default value"""
old_default = ExampleComponent.param.default_value
ExampleComponent.param.default_value = 199.0
comp = ExampleComponent()
assert comp.param == 199.0
ExampleComponent.param.default_value = old_default
def test_change_default_subclass():
""" check we can change a default value in subclass """
old_default = ExampleComponent.param.default_value
ExampleComponent.param.default_value = 199.0
comp = ExampleSubclass1()
assert comp.param == 199.0
ExampleComponent.param.default_value = old_default
def test_change_default_subclass_override():
""" check override default value """
old_default = ExampleComponent.param.default_value
ExampleComponent.param.default_value = 199.0
comp = ExampleSubclass2()
assert comp.param == 3.0 # No change as it is a seperate traitlet object
ExampleComponent.param.default_value = old_default
def test_help_changed_default():
""" check that the help text is updated if the default is changed """
old_default = ExampleComponent.param.default_value
ExampleComponent.param.default_value = 199.0
help_msg = ExampleComponent.class_get_help()
assert "Default: 199.0" in help_msg
ExampleComponent.param.default_value = old_default
def test_non_abstract_subclasses():
"""non_abstract_subclasses() is a helper function:
it might just be nice, in case a person wants to see the subclasses
in a python session.
Can also be helpful in parametrized tests, to make sure all
sublcasses are being tested.
"""
assert "ExampleSubclass1" in ExampleComponent.non_abstract_subclasses()
def test_from_name():
""" Make sure one can construct a Component subclass by name"""
subclass = ExampleComponent.from_name("ExampleSubclass1")
assert isinstance(subclass, ExampleSubclass1)
subclass = ExampleComponent.from_name("ExampleSubclass2")
assert isinstance(subclass, ExampleSubclass2)
def test_from_name_config():
""" make sure one can construct a Component subclass by name + config"""
config = Config({"ExampleComponent": {"param": 229.0}})
subclass = ExampleComponent.from_name("ExampleSubclass1", config=config)
assert subclass.param == 229.0
def test_component_current_config():
""" make sure one can get the full current configuration"""
comp = ExampleComponent()
full_config = comp.get_current_config()
assert "ExampleComponent" in full_config
assert "param" in full_config["ExampleComponent"]
assert full_config["ExampleComponent"]["param"] == 1.0
def test_component_html_repr():
""" check the HTML repr for Jupyter notebooks """
comp = ExampleComponent()
html = comp._repr_html_()
assert len(html) > 10
def test_telescope_component():
from ctapipe.core import TelescopeComponent
from ctapipe.instrument import SubarrayDescription, TelescopeDescription
subarray = SubarrayDescription(
"test",
tel_positions={1: [0, 0, 0] * u.m},
tel_descriptions={1: TelescopeDescription.from_name("LST", "LSTCam")},
)
class Base(TelescopeComponent):
pass
class Sub(Base):
pass
assert isinstance(Base.from_name("Sub", subarray=subarray), Sub)
def test_full_config():
class SubComponent(Component):
param = Int(default_value=3).tag(config=True)
class MyComponent(Component):
val = Int(default_value=42).tag(config=True)
def __init__(self, config=None, parent=None):
super().__init__(config=config, parent=parent)
self.sub = SubComponent(parent=self)
comp = MyComponent()
assert comp.get_current_config() == {
"MyComponent": {"val": 42, "SubComponent": {"param": 3}}
}
# test round tripping
comp = MyComponent()
comp.val = 10
comp.sub.param = -1
dict_config = comp.get_current_config()
config = Config(dict_config)
comp_from_config = MyComponent(config=config)
assert dict_config == comp_from_config.get_current_config()
| 29.90205 | 80 | 0.681801 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.